diff --git a/.github/workflows/docker-image-deer-flow.yml b/.github/workflows/docker-image-deer-flow.yml new file mode 100644 index 000000000..da6b26ffb --- /dev/null +++ b/.github/workflows/docker-image-deer-flow.yml @@ -0,0 +1,64 @@ +name: Deer Flow Docker Image CI + +on: + push: + branches: [ "main" ] + paths: + - 'runtime/deer-flow/**' + - 'scripts/images/deer-flow-backend/**' + - 'scripts/images/deer-flow-frontend/**' + - '.github/workflows/docker-image-deer-flow.yml' + pull_request: + branches: [ "main" ] + paths: + - 'runtime/deer-flow/**' + - 'scripts/images/deer-flow-backend/**' + - 'scripts/images/deer-flow-frontend/**' + - '.github/workflows/docker-image-deer-flow.yml' + workflow_dispatch: + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - uses: actions/checkout@v4 + + - name: Login to GitHub Container Registry + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set Docker Image Tag + id: set-tag + run: | + if [[ $GITHUB_REF == refs/tags/v* ]]; then + TAG=${GITHUB_REF#refs/tags/v} + echo "TAGS=$TAG" >> $GITHUB_OUTPUT + elif [[ $GITHUB_REF == refs/heads/main ]]; then + echo "TAGS=latest" >> $GITHUB_OUTPUT + else + echo "TAGS=temp" >> $GITHUB_OUTPUT + fi + + - name: Build Docker Image + run: | + make build-deer-flow VERSION=latest + + - name: Tag Docker Image + run: | + LOWERCASE_REPO=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') + docker tag deer-flow-backend:latest ghcr.io/$LOWERCASE_REPO/deer-flow-backend:${{ steps.set-tag.outputs.TAGS }} + docker tag deer-flow-frontend:latest ghcr.io/$LOWERCASE_REPO/deer-flow-frontend:${{ steps.set-tag.outputs.TAGS }} + + - name: Push Docker Image + if: github.event_name != 'pull_request' + run: | + docker push ghcr.io/$LOWERCASE_REPO/deer-flow-backend:${{ steps.set-tag.outputs.TAGS }} + docker push ghcr.io/$LOWERCASE_REPO/deer-flow-frontend:${{ steps.set-tag.outputs.TAGS }} diff --git a/Makefile b/Makefile index f57014e27..ae87539da 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,14 @@ WITH_MINERU ?= false # 默认不构建mineru VERSION ?= latest NAMESPACE ?= datamate +ifdef COMSPEC + # Windows 环境 + MAKE := "C:/Program Files (x86)/GnuWin32/bin/make" +else + # Linux/Mac 环境 + MAKE := make +endif + .PHONY: build-% build-%: $(MAKE) $*-docker-build @@ -76,15 +84,10 @@ label-studio-adapter-docker-build: .PHONY: deer-flow-docker-build deer-flow-docker-build: - @if [ -d "../deer-flow/.git" ]; then \ - cd ../deer-flow && git pull; \ - else \ - git clone git@github.com:bytedance/deer-flow.git ../deer-flow; \ - fi - sed -i "s/dark/light/g" "../deer-flow/web/src/components/deer-flow/theme-provider-wrapper.tsx" - cp -n deployment/docker/deer-flow/.env.example ../deer-flow/.env - cp -n deployment/docker/deer-flow/conf.yaml.example ../deer-flow/conf.yaml - cd ../deer-flow && docker compose build + cp -n runtime/deer-flow/.env.example runtime/deer-flow/.env + cp -n runtime/deer-flow/conf.yaml.example runtime/deer-flow/conf.yaml + docker build -t deer-flow-backend:$(VERSION) . -f scripts/images/deer-flow-backend/Dockerfile + docker build -t deer-flow-frontend:$(VERSION) . -f scripts/images/deer-flow-frontend/Dockerfile .PHONY: mineru-docker-build mineru-docker-build: @@ -131,7 +134,7 @@ mineru-k8s-uninstall: .PHONY: datamate-docker-install datamate-docker-install: - cd deployment/docker/datamate && cp -n .env.example .env && docker compose -f docker-compose.yml up -d + cd deployment/docker/datamate && cp .env.example .env && docker compose -f docker-compose.yml up -d .PHONY: datamate-docker-uninstall datamate-docker-uninstall: @@ -139,8 +142,11 @@ datamate-docker-uninstall: .PHONY: deer-flow-docker-install deer-flow-docker-install: - cd deployment/docker/datamate && cp -n .env.deer-flow.example .env && docker compose -f docker-compose.yml up -d - cd deployment/docker/deer-flow && cp -n .env.example .env && cp -n conf.yaml.example conf.yaml && docker compose -f docker-compose.yml up -d + cd deployment/docker/datamate && cp .env.deer-flow.example .env && docker compose -f docker-compose.yml up -d + cp -n runtime/deer-flow/.env.example runtime/deer-flow/.env + cp -n runtime/deer-flow/conf.yaml.example runtime/deer-flow/conf.yaml + cp runtime/deer-flow/.env deployment/docker/deer-flow/.env && cp runtime/deer-flow/conf.yaml deployment/docker/deer-flow/conf.yaml + cd deployment/docker/deer-flow && docker compose -f docker-compose.yml up -d .PHONY: deer-flow-docker-uninstall deer-flow-docker-uninstall: @@ -158,3 +164,22 @@ datamate-k8s-install: create-namespace datamate-k8s-uninstall: helm uninstall datamate -n $(NAMESPACE) --ignore-not-found kubectl delete configmap datamate-init-sql -n $(NAMESPACE) --ignore-not-found + +.PHONY: deer-flow-k8s-install +deer-flow-k8s-install: + helm upgrade datamate deployment/helm/datamate/ -n $(NAMESPACE) --install --set global.deerFlow.enable=true + cp runtime/deer-flow/.env deployment/helm/deer-flow/charts/public/.env + cp runtime/deer-flow/conf.yaml deployment/helm/deer-flow/charts/public/conf.yaml + helm upgrade deer-flow deployment/helm/deer-flow -n $(NAMESPACE) --install + +.PHONY: deer-flow-k8s-uninstall +deer-flow-k8s-uninstall: + helm uninstall deer-flow -n $(NAMESPACE) --ignore-not-found + +.PHONY: milvus-k8s-install +milvus-k8s-install: + helm upgrade milvus deployment/helm/milvus -n $(NAMESPACE) --install + +.PHONY: milvus-k8s-uninstall +milvus-k8s-uninstall: + helm uninstall milvus -n $(NAMESPACE) --ignore-not-found \ No newline at end of file diff --git a/backend/services/main-application/src/main/resources/application.yml b/backend/services/main-application/src/main/resources/application.yml index 6c66d63b8..f70a819dd 100644 --- a/backend/services/main-application/src/main/resources/application.yml +++ b/backend/services/main-application/src/main/resources/application.yml @@ -164,8 +164,3 @@ datamate: quality-control: enabled: ${QC_ENABLED:true} threshold: ${QC_THRESHOLD:0.8} - - # RAG配置 - rag: - milvus-host: ${MILVUS_HOST:milvus-standalone} - milvus-port: ${MILVUS_PORT:19530} diff --git a/backend/services/operator-market-service/src/main/java/com/datamate/operator/application/OperatorService.java b/backend/services/operator-market-service/src/main/java/com/datamate/operator/application/OperatorService.java index 5cd1041f9..62fe42e30 100644 --- a/backend/services/operator-market-service/src/main/java/com/datamate/operator/application/OperatorService.java +++ b/backend/services/operator-market-service/src/main/java/com/datamate/operator/application/OperatorService.java @@ -32,7 +32,7 @@ public class OperatorService { private final FileService fileService; - @Value("${operator.base.path:/operator}") + @Value("${operator.base.path:/operators}") private String operatorBasePath; public List getOperators(Integer page, Integer size, List categories, diff --git a/backend/services/rag-indexer-service/src/main/java/com/datamate/rag/indexer/infrastructure/event/RagEtlService.java b/backend/services/rag-indexer-service/src/main/java/com/datamate/rag/indexer/infrastructure/event/RagEtlService.java index 75d4e5eef..a89495f1e 100644 --- a/backend/services/rag-indexer-service/src/main/java/com/datamate/rag/indexer/infrastructure/event/RagEtlService.java +++ b/backend/services/rag-indexer-service/src/main/java/com/datamate/rag/indexer/infrastructure/event/RagEtlService.java @@ -51,9 +51,9 @@ public class RagEtlService { private static final Semaphore SEMAPHORE = new Semaphore(10); - @Value("${datamate.rag.milvus-host}") + @Value("${datamate.rag.milvus-host:-milvus-standalone}") private String milvusHost; - @Value("${datamate.rag.milvus-port}") + @Value("${datamate.rag.milvus-port:-19530}") private int milvusPort; private final RagFileRepository ragFileRepository; @@ -122,7 +122,7 @@ private void processRagFile(RagFile ragFile, DataInsertedEvent event) { // 调用嵌入模型获取嵌入向量 List content = embeddingModel.embedAll(split).content(); // 存储嵌入向量到 Milvus - embeddingStore(embeddingModel, ragFile.getKnowledgeBaseId()).addAll(content, split); + embeddingStore(embeddingModel, event.knowledgeBase().getName()).addAll(content, split); } /** @@ -152,11 +152,11 @@ public DocumentSplitter documentSplitter(ProcessType processType) { }; } - public EmbeddingStore embeddingStore(EmbeddingModel embeddingModel, String knowledgeBaseId) { + public EmbeddingStore embeddingStore(EmbeddingModel embeddingModel, String knowledgeBaseName) { return MilvusEmbeddingStore.builder() .host(milvusHost) .port(milvusPort) - .collectionName("datamate_" + knowledgeBaseId) + .collectionName(knowledgeBaseName) .dimension(embeddingModel.dimension()) .build(); } diff --git a/deployment/helm/datamate/charts/backend/values.yaml b/deployment/helm/datamate/charts/backend/values.yaml index cf4295456..681af058c 100644 --- a/deployment/helm/datamate/charts/backend/values.yaml +++ b/deployment/helm/datamate/charts/backend/values.yaml @@ -19,13 +19,7 @@ imagePullSecrets: [] nameOverride: "datamate-backend" fullnameOverride: "datamate-backend" -env: - - name: namespace - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SPRING_CONFIG_LOCATION - value: file:/opt/backend/application.yml +env: [] # This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ serviceAccount: diff --git a/deployment/helm/datamate/charts/frontend/templates/configmap.yaml b/deployment/helm/datamate/charts/frontend/templates/configmap.yaml index dd1c3041c..a1d81a6d7 100644 --- a/deployment/helm/datamate/charts/frontend/templates/configmap.yaml +++ b/deployment/helm/datamate/charts/frontend/templates/configmap.yaml @@ -1,3 +1,4 @@ +{{- if .Values.global.deerFlow.enable }} apiVersion: v1 kind: ConfigMap metadata: @@ -13,6 +14,8 @@ data: client_max_body_size 1024M; + add_header Set-Cookie "NEXT_LOCALE=zh"; + location /api/ { proxy_pass http://datamate-backend:8080/api/; proxy_set_header Host $host; @@ -20,9 +23,63 @@ data: proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } + location /chat { + proxy_pass http://deer-flow-frontend:3000/chat; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /_next { + proxy_pass http://deer-flow-frontend:3000/_next; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /deer-flow-backend/ { + proxy_pass http://deer-flow-backend:8000/api/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location / { + if ($query_string ~* "_rsc=pmmii") { + proxy_pass http://deer-flow-frontend:3000; + break; + } + root /opt/frontend; try_files $uri $uri/ /index.html; } } +{{- else }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: datamate-nginx-conf +data: + backend.conf: | + server { + listen 80; + server_name 0.0.0.0; + access_log /var/log/datamate/frontend/access.log main; + error_log /var/log/datamate/frontend/error.log notice; + + client_max_body_size 1024M; + + location /api/ { + proxy_pass http://datamate-backend:8080/api/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location / { + root /opt/frontend; + try_files $uri $uri/ /index.html; + } + } +{{- end }} \ No newline at end of file diff --git a/deployment/helm/datamate/charts/frontend/templates/deployment.yaml b/deployment/helm/datamate/charts/frontend/templates/deployment.yaml index 1aa5c4fa9..c8ddbc443 100644 --- a/deployment/helm/datamate/charts/frontend/templates/deployment.yaml +++ b/deployment/helm/datamate/charts/frontend/templates/deployment.yaml @@ -13,10 +13,11 @@ spec: {{- include "frontend.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} annotations: + checksum/nginx: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} labels: {{- include "frontend.labels" . | nindent 8 }} {{- with .Values.podLabels }} diff --git a/deployment/helm/deer-flow/.helmignore b/deployment/helm/deer-flow/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/helm/deer-flow/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/deer-flow/Chart.yaml b/deployment/helm/deer-flow/Chart.yaml new file mode 100644 index 000000000..930fd6bed --- /dev/null +++ b/deployment/helm/deer-flow/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: deer-flow +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" diff --git a/deployment/helm/deer-flow/charts/backend/.helmignore b/deployment/helm/deer-flow/charts/backend/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/deer-flow/charts/backend/Chart.yaml b/deployment/helm/deer-flow/charts/backend/Chart.yaml new file mode 100644 index 000000000..82c00868d --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: backend +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" diff --git a/deployment/helm/deer-flow/charts/backend/templates/_helpers.tpl b/deployment/helm/deer-flow/charts/backend/templates/_helpers.tpl new file mode 100644 index 000000000..29d49ada7 --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/templates/_helpers.tpl @@ -0,0 +1,75 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "backend.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "backend.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "backend.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "backend.labels" -}} +helm.sh/chart: {{ include "backend.chart" . }} +{{ include "backend.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "backend.selectorLabels" -}} +app.kubernetes.io/name: {{ include "backend.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "backend.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "backend.fullname" .) .Values.serviceAccount.name -}} +{{- else }} +{{- default "default" .Values.serviceAccount.name -}} +{{- end }} +{{- end }} + +{{/* +Name of image +*/}} +{{- define "backend.image" -}} +{{- $name := default .Values.image.repository .Values.global.image.backend.name }} +{{- $tag := default .Values.image.tag .Values.global.image.backend.tag }} +{{- if .Values.global.image.repository }} +{{- .Values.global.image.repository | trimSuffix "/" }}/{{ $name }}:{{ $tag }} +{{- else }} +{{- $name }}:{{ $tag }} +{{- end }} +{{- end }} diff --git a/deployment/helm/deer-flow/charts/backend/templates/deployment.yaml b/deployment/helm/deer-flow/charts/backend/templates/deployment.yaml new file mode 100644 index 000000000..b7290fe35 --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "backend.fullname" . }} + labels: + {{- include "backend.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "backend.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "backend.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "backend.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ include "backend.image" . }}" + imagePullPolicy: {{ default .Values.global.image.pullPolicy .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.envFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/deployment/helm/deer-flow/charts/backend/templates/service.yaml b/deployment/helm/deer-flow/charts/backend/templates/service.yaml new file mode 100644 index 000000000..4477c220a --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "backend.fullname" . }} + labels: + {{- include "backend.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ .Chart.Name }} + selector: + {{- include "backend.selectorLabels" . | nindent 4 }} diff --git a/deployment/helm/deer-flow/charts/backend/templates/serviceaccount.yaml b/deployment/helm/deer-flow/charts/backend/templates/serviceaccount.yaml new file mode 100644 index 000000000..0977e1633 --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "backend.serviceAccountName" . }} + labels: + {{- include "backend.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/deployment/helm/deer-flow/charts/backend/values.yaml b/deployment/helm/deer-flow/charts/backend/values.yaml new file mode 100644 index 000000000..931f6632a --- /dev/null +++ b/deployment/helm/deer-flow/charts/backend/values.yaml @@ -0,0 +1,111 @@ +# Default values for datamate. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ +replicaCount: 1 + +# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ +image: + repository: "deer-flow-backend" + # This sets the pull policy for images. + pullPolicy: "IfNotPresent" + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# This is to override the chart name. +nameOverride: "deer-flow-backend" +fullnameOverride: "deer-flow-backend" + +envFrom: [] +env: [] + +confYaml: {} + +# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# This is for setting Kubernetes Annotations to a Pod. +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} +# This is for setting Kubernetes Labels to a Pod. +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ +service: + # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: ClusterIP + # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports + port: 8000 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +# livenessProbe: +# httpGet: +# path: / +# port: http +# readinessProbe: +# httpGet: +# path: / +# port: http + +# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/deployment/helm/deer-flow/charts/frontend/.helmignore b/deployment/helm/deer-flow/charts/frontend/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/deer-flow/charts/frontend/Chart.yaml b/deployment/helm/deer-flow/charts/frontend/Chart.yaml new file mode 100644 index 000000000..98025e322 --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/Chart.yaml @@ -0,0 +1,29 @@ +apiVersion: v2 +name: frontend +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" + +dependencies: + - name: backend + repository: file://../backend + version: 0.0.1 diff --git a/deployment/helm/deer-flow/charts/frontend/templates/_helpers.tpl b/deployment/helm/deer-flow/charts/frontend/templates/_helpers.tpl new file mode 100644 index 000000000..8bc9b2e54 --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/templates/_helpers.tpl @@ -0,0 +1,75 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "frontend.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "frontend.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "frontend.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "frontend.labels" -}} +helm.sh/chart: {{ include "frontend.chart" . }} +{{ include "frontend.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "frontend.selectorLabels" -}} +app.kubernetes.io/name: {{ include "frontend.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "frontend.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "frontend.fullname" .) .Values.serviceAccount.name -}} +{{- else }} +{{- default "default" .Values.serviceAccount.name -}} +{{- end }} +{{- end }} + +{{/* +Name of image +*/}} +{{- define "frontend.image" -}} +{{- $name := default .Values.image.repository .Values.global.image.frontend.name }} +{{- $tag := default .Values.image.tag .Values.global.image.frontend.tag }} +{{- if .Values.global.image.repository }} +{{- .Values.global.image.repository | trimSuffix "/" }}/{{ $name }}:{{ $tag }} +{{- else }} +{{- $name }}:{{ $tag }} +{{- end }} +{{- end }} diff --git a/deployment/helm/deer-flow/charts/frontend/templates/deployment.yaml b/deployment/helm/deer-flow/charts/frontend/templates/deployment.yaml new file mode 100644 index 000000000..a5f2af15f --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "frontend.fullname" . }} + labels: + {{- include "frontend.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "frontend.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "frontend.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "frontend.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ include "frontend.image" . }}" + imagePullPolicy: {{ default .Values.global.image.pullPolicy .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.envFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/deployment/helm/deer-flow/charts/frontend/templates/service.yaml b/deployment/helm/deer-flow/charts/frontend/templates/service.yaml new file mode 100644 index 000000000..dfa59a5f0 --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "frontend.fullname" . }} + labels: + {{- include "frontend.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.port }} + protocol: TCP + name: {{ .Chart.Name }} + selector: + {{- include "frontend.selectorLabels" . | nindent 4 }} diff --git a/deployment/helm/deer-flow/charts/frontend/templates/serviceaccount.yaml b/deployment/helm/deer-flow/charts/frontend/templates/serviceaccount.yaml new file mode 100644 index 000000000..22facbdb0 --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "frontend.serviceAccountName" . }} + labels: + {{- include "frontend.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/deployment/helm/deer-flow/charts/frontend/values.yaml b/deployment/helm/deer-flow/charts/frontend/values.yaml new file mode 100644 index 000000000..96fa21161 --- /dev/null +++ b/deployment/helm/deer-flow/charts/frontend/values.yaml @@ -0,0 +1,109 @@ +# Default values for datamate. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ +replicaCount: 1 + +# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ +image: + repository: "deer-flow-frontend" + # This sets the pull policy for images. + pullPolicy: "IfNotPresent" + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# This is to override the chart name. +nameOverride: "deer-flow-frontend" +fullnameOverride: "deer-flow-frontend" + +envFrom: [] +env: [] + +# This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# This is for setting Kubernetes Annotations to a Pod. +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} +# This is for setting Kubernetes Labels to a Pod. +# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ +service: + # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: ClusterIP + # This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports + port: 3000 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +# livenessProbe: +# httpGet: +# path: / +# port: http +# readinessProbe: +# httpGet: +# path: / +# port: http + +# This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/ +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/deployment/helm/deer-flow/charts/public/.helmignore b/deployment/helm/deer-flow/charts/public/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deployment/helm/deer-flow/charts/public/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/deer-flow/charts/public/Chart.yaml b/deployment/helm/deer-flow/charts/public/Chart.yaml new file mode 100644 index 000000000..4e089ca64 --- /dev/null +++ b/deployment/helm/deer-flow/charts/public/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: public +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" diff --git a/deployment/helm/deer-flow/charts/public/templates/secret.yaml b/deployment/helm/deer-flow/charts/public/templates/secret.yaml new file mode 100644 index 000000000..f94949625 --- /dev/null +++ b/deployment/helm/deer-flow/charts/public/templates/secret.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Secret +metadata: + name: deer-flow-env +stringData: + {{- range .Files.Lines ".env" }} + {{- $line := trim . }} + {{- if and $line (not (hasPrefix "#" $line)) }} + {{- $pair := regexSplit "=" $line 2 }} + {{- if eq (len $pair) 2 }} + {{ index $pair 0 | trim }}: {{ index $pair 1 | trim | quote }} + {{- end }} + {{- end }} + {{- end }} + +--- +apiVersion: v1 +kind: Secret +metadata: + name: deer-flow-conf +stringData: +{{ (.Files.Glob "conf.yaml").AsConfig | indent 2 }} \ No newline at end of file diff --git a/deployment/helm/deer-flow/charts/public/values.yaml b/deployment/helm/deer-flow/charts/public/values.yaml new file mode 100644 index 000000000..85d8efac8 --- /dev/null +++ b/deployment/helm/deer-flow/charts/public/values.yaml @@ -0,0 +1,3 @@ +# Default values for datamate. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. diff --git a/deployment/helm/deer-flow/values.yaml b/deployment/helm/deer-flow/values.yaml new file mode 100644 index 000000000..c070d2593 --- /dev/null +++ b/deployment/helm/deer-flow/values.yaml @@ -0,0 +1,34 @@ +# Default values for deer-flow. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + image: + repository: "" + pullPolicy: "IfNotPresent" + backend: + name: "deer-flow-backend" + tag: "latest" + frontend: + name: "deer-flow-frontend" + tag: "latest" + +envFrom: &envFrom + secretRef: + name: deer-flow-env + +backend: + envFrom: + - *envFrom + volumes: + - name: deer-flow-conf + secret: + secretName: deer-flow-conf + volumeMounts: + - name: deer-flow-conf + mountPath: /app/conf.yaml + subPath: conf.yaml + +frontend: + envFrom: + - *envFrom diff --git a/deployment/helm/milvus/.helmignore b/deployment/helm/milvus/.helmignore new file mode 100644 index 000000000..a9fe72788 --- /dev/null +++ b/deployment/helm/milvus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/deployment/helm/milvus/Chart.yaml b/deployment/helm/milvus/Chart.yaml new file mode 100644 index 000000000..c77c26aa7 --- /dev/null +++ b/deployment/helm/milvus/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +appVersion: 2.6.4 +description: Milvus is an open-source vector database built to power AI applications + and vector similarity search. +home: https://milvus.io/ +icon: https://raw.githubusercontent.com/milvus-io/docs/master/v1.0.0/assets/milvus_logo.png +keywords: +- milvus +- elastic +- vector +- search +- deploy +kubeVersion: ^1.10.0-0 +maintainers: +- email: contact@milvus.io + name: contact + url: milvus.io +name: milvus +sources: +- https://github.com/zilliztech/milvus +version: 5.0.6 diff --git a/deployment/helm/milvus/README-TEI.md b/deployment/helm/milvus/README-TEI.md new file mode 100644 index 000000000..e104fce6c --- /dev/null +++ b/deployment/helm/milvus/README-TEI.md @@ -0,0 +1,209 @@ +# Text Embeddings Inference (TEI) Integration Guide + +This document describes how to use Text Embeddings Inference (TEI) service with Milvus Helm Chart, and how to integrate TEI with Milvus. TEI is an open-source project developed by Hugging Face, available at [https://github.com/huggingface/text-embeddings-inference](https://github.com/huggingface/text-embeddings-inference). + +## Overview + +Text Embeddings Inference (TEI) is a high-performance text embedding model inference service that converts text into vector representations. Milvus is a vector database that can store and retrieve these vectors. By combining the two, you can build powerful semantic search and retrieval systems. + +## Deployment Methods + +This guide provides two ways to use TEI: +1. Deploy TEI service directly through the Milvus Helm Chart +2. Use external TEI service with Milvus integration + +## Deploy TEI through Milvus Helm Chart + +### Basic Configuration + +Enable TEI service in `values.yaml`: + +```yaml +tei: + enabled: true + modelId: "BAAI/bge-large-en-v1.5" # Specify the model to use +``` + +This is the simplest configuration, just specify `enabled: true` and the desired `modelId`. + +### Complete Configuration Options + +```yaml +tei: + enabled: true # Enable TEI service + name: text-embeddings-inference # Service name + replicas: 1 # Number of TEI replicas + image: + repository: ghcr.io/huggingface/text-embeddings-inference # Image repository + tag: cpu-1.6 # Image tag (CPU version) + pullPolicy: IfNotPresent # Image pull policy + service: + type: ClusterIP # Service type + port: 8080 # Service port + annotations: {} # Service annotations + labels: {} # Service labels + resources: # Resource configuration + requests: + cpu: "4" # CPU request + memory: "8Gi" # Memory request + limits: + cpu: "8" # CPU limit + memory: "16Gi" # Memory limit + persistence: # Persistence storage configuration + enabled: true # Enable persistence storage + mountPath: "/data" # Mount path + annotations: {} # Storage annotations + persistentVolumeClaim: # PVC configuration + existingClaim: "" # Use existing PVC + storageClass: # Storage class + accessModes: ReadWriteOnce # Access modes + size: 50Gi # Storage size + subPath: "" # Sub path + modelId: "BAAI/bge-large-en-v1.5" # Model ID + extraArgs: [] # Additional command line arguments for TEI, such as "--max-batch-tokens=16384", "--max-client-batch-size=32", "--max-concurrent-requests=128", etc. + nodeSelector: {} # Node selector + affinity: {} # Affinity configuration + tolerations: [] # Tolerations + topologySpreadConstraints: [] # Topology spread constraints + extraEnv: [] # Additional environment variables +``` + +### Using GPU Acceleration + +If you have GPU resources, you can use the GPU version of the TEI image to accelerate inference: + +```yaml +tei: + enabled: true + modelId: "BAAI/bge-large-en-v1.5" + image: + repository: ghcr.io/huggingface/text-embeddings-inference + tag: 1.6 # GPU version + resources: + limits: + nvidia.com/gpu: 1 # Allocate 1 GPU +``` + + +## Frequently Asked Questions + +### How to determine the embedding dimension of a model? + +Different models have different embedding dimensions. Here are the dimensions of some commonly used models: +- BAAI/bge-large-en-v1.5: 1024 +- BAAI/bge-base-en-v1.5: 768 +- nomic-ai/nomic-embed-text-v1: 768 +- sentence-transformers/all-mpnet-base-v2: 768 + +You can find this information in the model's documentation or get it through the TEI service's API. + +### How to test if the TEI service is working properly? + +After deploying the TEI service, you can use the following commands to test if the service is working properly: + +```bash +# Get the TEI service endpoint +export TEI_SERVICE=$(kubectl get svc -l component=text-embeddings-inference -o jsonpath='{.items[0].metadata.name}') + +# Test the embedding functionality +kubectl run -it --rm curl --image=curlimages/curl -- curl -X POST "http://${TEI_SERVICE}:8080/embed" \ + -H "Content-Type: application/json" \ + -d '{"inputs":"This is a test text"}' +``` + +### How to use TEI-generated embeddings in Milvus? + +In Milvus, you can use TEI-generated embeddings for the following operations: + +1. When creating a collection, specify the vector dimension to match the TEI model output dimension +2. Before inserting data, use the TEI service to convert text to vectors +3. When searching, similarly use the TEI service to convert query text to vectors + +## Using Milvus Text Embedding Function + +Milvus provides a text embedding function feature that allows you to generate vector embeddings directly within Milvus. You can configure Milvus to use TEI as the backend for this function. + +### Using the Text Embedding Function in Milvus + +1. Specify the embedding function when creating a collection: + +```python +from pymilvus import MilvusClient, DataType, Function, FunctionType + +# Connect to Milvus +client = MilvusClient(uri="http://localhost:19530") + +# 1. Create schema +schema = client.create_schema() + +# 2. Add fields +schema.add_field("id", DataType.INT64, is_primary=True, auto_id=False) # Primary key +schema.add_field("text", DataType.VARCHAR, max_length=65535) # Text field +schema.add_field("embedding", DataType.FLOAT_VECTOR, dim=768) # Vector field, dimension must match TEI model output + +# 3. Define TEI embedding function +tei_embedding_function = Function( + name="tei_func", # Unique identifier for this embedding function + function_type=FunctionType.TEXTEMBEDDING, # Indicates a text embedding function + input_field_names=["text"], # Scalar field(s) containing text data to embed + output_field_names=["embedding"], # Vector field(s) for storing embeddings + params={ # TEI specific parameters + "provider": "TEI", # Must be set to "TEI" + "endpoint": "http://tei-service:8080", # TEI service address + # Optional: "api_key": "your_secure_api_key", + # Optional: "truncate": "true", + # Optional: "truncation_direction": "right", + # Optional: "max_client_batch_size": 64, + # Optional: "ingestion_prompt": "passage: ", + # Optional: "search_prompt": "query: " + } +) +schema.add_function(tei_embedding_function) + +# 4. Create collection with schema and index param +index_params = client.prepare_index_params() +index_params.add_index( + field_name="embedding", + index_name="embedding_index", + index_type="HNSW", + metric_type="COSINE", +) +client.create_collection( + name="test_collection", + schema=schema, + index_params=index_params +) +client.load_collection( + collection_name="test_collection" +) + +``` + +2. Automatically generate embeddings when inserting data + +```python +# Insert data, Milvus will automatically call the TEI service to generate embedding vectors +client.insert( + collection_name="test_collection", + data=[ + {"id": 1, "text": "This is a sample document about artificial intelligence."}, + {"id": 2, "text": "Vector databases are designed to handle embeddings efficiently."} + ] +) +``` + +3. Automatically generate query embeddings when searching: + +```python +# Search directly using text, Milvus will automatically call the TEI service to generate query vectors +results = client.search( + collection_name="test_collection", + data=["Tell me about AI technology"], + anns_field="embedding", + search_params={ + "metric_type": "COSINE", + "params": {} + }, + limit=3 +) +``` diff --git a/deployment/helm/milvus/README.md b/deployment/helm/milvus/README.md new file mode 100644 index 000000000..85a2cea5e --- /dev/null +++ b/deployment/helm/milvus/README.md @@ -0,0 +1,679 @@ +# Milvus Helm Chart + +For more information about installing and using Helm, see the [Helm Docs](https://helm.sh/docs/). For a quick introduction to Charts, see the [Chart Guide](https://helm.sh/docs/topics/charts/). + +To install Milvus, refer to [Milvus installation](https://milvus.io/docs/v2.0.x/install_cluster-helm.md). + +## Introduction +This chart bootstraps Milvus deployment on a Kubernetes cluster using the Helm package manager. + +## Prerequisites + +- Kubernetes >= 1.20.0 +- Helm >= 3.14.0 + +## Compatibility Notice + +- **IMPORTANT** As of helm version 5.0.0, significant architectural changes have been introduced in Milvus v2.6.0: + - Coordinator consolidation: Legacy separate coordinators (dataCoord, queryCoord, indexCoord) have been consolidated into a single mixCoord + - New components: Introduction of Streaming Node for enhanced data processing + - Component removal: indexNode has been removed and consolidated + - Milvus v2.6.0-rc1 is not compatible with v2.6.x. Direct upgrades from release candidates are not supported. + - You must upgrade to v2.5.16 with mixCoordinator enabled before upgrading to v2.6.x. + +- **IMPORTANT** For users using pulsar2. Please don't use version 4.2.21~4.2.29 for upgrading, there're some known issues. 4.2.30 or later version is recommended. Remember to add `--set pulsar.enabled=true,pulsarv3.enabled=false` or set them in your values file when upgrading. + +- As of version 4.2.21, the Milvus Helm chart introduced pulsar-v3.x chart as dependency. For backward compatibility, please upgrade your helm to v3.14 or later version, and be sure to add the `--reset-then-reuse-values` option whenever you use `helm upgrade`. + +- As of version 4.2.0, the Milvus Helm chart no longer supports Milvus v2.3.x. If you need to deploy Milvus v2.3.x using Helm, please use Milvus Helm chart version less than 4.2.0 (e.g 4.1.36). + +> **IMPORTANT** The master branch is for the development of Milvus v2.x. On March 9th, 2021, we released Milvus v1.0, the first stable version of Milvus with long-term support. To use Milvus v1.x, switch to [branch 1.1](https://github.com/zilliztech/milvus-helm/tree/1.1). + +## Install the Chart + +1. Add the stable repository +```bash +$ helm repo add zilliztech https://zilliztech.github.io/milvus-helm/ +``` + +2. Update charts repositories +``` +$ helm repo update +``` + +### Deploy Milvus with standalone mode + +Assume the release name is `my-release`: + +```bash +# Helm v3.x +$ helm upgrade --install my-release --set cluster.enabled=false --set etcd.replicaCount=1 --set pulsarv3.enabled=false --set minio.mode=standalone zilliztech/milvus +``` +By default, milvus standalone uses `woodpecker` as message queue. You can also use `pulsar` or `kafka` as message queue: + +```bash +# Helm v3.x +# Milvus Standalone with pulsar as message queue +$ helm upgrade --install my-release --set cluster.enabled=false --set standalone.messageQueue=pulsar --set etcd.replicaCount=1 --set pulsarv3.enabled=true --set minio.mode=standalone zilliztech/milvus +``` + +```bash +# Helm v3.x +# Milvus Standalone with kafka as message queue +$ helm upgrade --install my-release --set cluster.enabled=false --set standalone.messageQueue=kafka --set etcd.replicaCount=1 --set pulsarv3.enabled=false --set kafka.enabled=true --set minio.mode=standalone zilliztech/milvus +``` +If you need to use standalone mode with embedded ETCD and local storage (without starting MinIO and additional ETCD), you can use the following steps: + +1. Prepare a values file +``` +cat > values.yaml < **Tip**: To list all releases, using `helm list`. + +### Deploy Milvus with cluster mode + +Assume the release name is `my-release`: + +```bash +# Helm v3.x +$ helm upgrade --install my-release zilliztech/milvus +``` +By default, milvus cluster uses `pulsar` as message queue. You can also use `kafka` instead of `pulsar` for milvus cluster: + +```bash +# Helm v3.x +$ helm upgrade --install my-release zilliztech/milvus --set pulsarv3.enabled=false --set kafka.enabled=true +``` + +By default, milvus cluster uses `mixCoordinator` which contains all coordinators. This is the recommended deployment approach for Milvus v2.6.x and later versions. + +### Upgrading from Milvus v2.5.x to v2.6.x + +Upgrading from Milvus 2.5.x to 2.6.x involves significant architectural changes. Please follow these steps carefully: + +#### Requirements + +**System requirements:** +- Helm version >= 3.14.0 +- Kubernetes version >= 1.20.0 +- Milvus cluster deployed via Helm Chart + +**Compatibility requirements:** +- Milvus v2.6.0-rc1 is not compatible with v2.6.x. Direct upgrades from release candidates are not supported. +- If you are currently running v2.6.0-rc1 and need to preserve your data, please refer to community guides for migration assistance. +- You must upgrade to v2.5.16 with mixCoordinator enabled before upgrading to v2.6.x. + +#### Upgrade Process + +**Step 1: Upgrade Helm Chart** + +First, upgrade your Milvus Helm chart repository: + +```bash +helm repo add zilliztech https://zilliztech.github.io/milvus-helm +helm repo update zilliztech +``` + +**Step 2: Upgrade to v2.5.16 with mixCoordinator** + +Check if your cluster currently uses separate coordinators: + +```bash +kubectl get pods +``` + +If you see separate coordinator pods (datacoord, querycoord, indexcoord), upgrade to v2.5.16 and enable mixCoordinator: + +```bash +helm upgrade my-release zilliztech/milvus \ + --set image.all.tag="v2.5.16" \ + --set mixCoordinator.enabled=true \ + --set rootCoordinator.enabled=false \ + --set indexCoordinator.enabled=false \ + --set queryCoordinator.enabled=false \ + --set dataCoordinator.enabled=false \ + --reset-then-reuse-values \ + --version=4.2.58 +``` + +If your cluster already uses mixCoordinator, simply upgrade the image: + +```bash +helm upgrade my-release zilliztech/milvus \ + --set image.all.tag="v2.5.16" \ + --reset-then-reuse-values \ + --version=4.2.58 +``` + +**Step 3: Upgrade to v2.6.x** + +Once v2.5.16 is running successfully with mixCoordinator, upgrade to v2.6.x: + +```bash +helm upgrade my-release zilliztech/milvus \ + --set image.all.tag="v2.6.x" \ + --set streaming.enabled=true \ + --set indexNode.enabled=false \ + --reset-then-reuse-values \ + --version=5.0.0 +``` + +### Upgrade an existing Milvus cluster (General) + +> **IMPORTANT** If you have installed a milvus cluster with version below v2.1.x, you need follow the instructions at here: https://github.com/milvus-io/milvus/blob/master/deployments/migrate-meta/README.md. After meta migration, you use `helm upgrade` to update your cluster again. + +E.g. to scale out query node from 1(default) to 2: + +```bash +# Helm v3.14.0+ +$ helm upgrade --reset-then-reuse-values --install --set queryNode.replicas=2 my-release zilliztech/milvus +``` + +### Milvus Mix Coordinator Active Standby +> **IMPORTANT** Milvus supports deploying multiple Mix Coordinator instances for high availability. For example, you can run a Milvus cluster with two mixcoord pods: + +```bash +helm upgrade --install my-release zilliztech/milvus --set mixCoordinator.activeStandby.enabled=true --set mixCoordinator.replicas=2 +``` + +### Breaking Changes + +> **IMPORTANT** Milvus helm chart 4.0.0 has breaking changes for milvus configuration. Previously, you can set segment size like this `--set dataCoordinator.segment.maxSize=1024`. Now we have remove all the shortcut config option. Instead, you can set using `extraConfigFiles` like this: +```bash +extraConfigFiles: + user.yaml: |+ + dataCoord: + segment: + maxSize: 1024 +``` + +So if you had deployed a cluster with helm chart version below 4.0.0 and also specified extra config, you need set the configs under `extraConfigFiles` when running `helm upgrade`. + +> **IMPORTANT** Milvus has removed mysql as meta store support from v2.3.1. And milvus helm chart has also removed mysql as dependency from chart version 4.1.8. + +### Enable log to file + +By default, all the logs of milvus components will output stdout. If you wanna log to file, you'd install milvus with `--set log.persistence.enabled=true`. Note that you should have a storageclass with `ReadWriteMany` access modes. + +```bash +# Install a milvus cluster with file log output +helm install my-release zilliztech/milvus --set log.persistence.enabled=true --set log.persistence.persistentVolumeClaim.storageClass= +``` + +It will output log to `/milvus/logs/` directory. + +### Enable proxy tls connection +By default the TLS connection to proxy service is false, to enable TLS with users' own certificate and privatge key, it can be specified in `extraConfigFiles` like this: + +```bash +extraConfigFiles: + user.yaml: |+ + # Enable tlsMode and set the tls cert and key + tls: + serverPemPath: /etc/milvus/certs/tls.crt + serverKeyPath: /etc/milvus/certs/tls.key + common: + security: + tlsMode: 1 + +``` +The path specified above are TLS secret data mounted inside the proxy pod as files. To create a TLS secret, set `proxy.tls.enabled` to `true` then provide base64-encoded values for your certificate and private key files in values.yaml: + +```bash +proxy: + enabled: true + tls: + enabled: true + secretName: milvus-tls + #expecting base64 encoded values here: i.e. $(cat tls.crt | base64 -w 0) and $(cat tls.key | base64 -w 0) + key: LS0tLS1C.... + crt: LS0tLS1CR... +``` +or in cli using --set: + +```bash + --set proxy.tls.enabled=true \ + --set prox.tls.key=$(cat /path/to/private_key_file | base64 -w 0) \ + --set prox.tls.crt=$(cat /path/to/certificate_file | base64 -w 0) +``` +In case you want to use a different `secretName` or mount path inside pod, modify `prox.tls.secretName` above, and `serverPemPath` and `serverPemPath` in `extraConfigFles `accordingly, then in the `volume` and `volumeMounts` sections in values.yaml + +```bash + volumes: + - secret: + secretName: Your-tls-secret-name + name: milvus-tls + volumeMounts: + - mountPath: /Your/tls/files/path/ + name: milvus-tls +``` + +## Milvus with External Object Storage + +As of https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z, the MinIO Gateway and the related filesystem mode code have been removed. It is now recommended to utilize the `externalS3` configuration for integrating with various object storage services. Notably, Milvus now provides support for popular object storage platforms such as AWS S3, GCP GCS, Azure Blob, Aliyun OSS and Tencent COS. + +The recommended configuration option for `externalS3.cloudProvider` includes the following choices: `aws`, `gcp`, `azure`, `aliyun`, and `tencent`. Here's an example to use AWS S3 for Milvus object storage: + +``` +minio: + enabled: false +externalS3: + enabled: true + cloudProvider: aws + host: s3.aws.com + port: 443 + useSSL: true + bucketName: + accessKey: + secretKey: +``` + + +## Uninstall the Chart + +```bash +# Helm v3.x +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +- Completely uninstall Milvus + +> **IMPORTANT** Please run this command with care. Maybe you want to keep ETCD data +```bash +MILVUS_LABELS="app.kubernetes.io/instance=my-release" +kubectl delete pvc $(kubectl get pvc -l "${MILVUS_LABELS}" -o jsonpath='{range.items[*]}{.metadata.name} ') +``` + +## Configuration + +### Milvus Service Configuration + +The following table lists the configurable parameters of the Milvus Service and their default values. + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `cluster.enabled` | Enable or disable Milvus Cluster mode | `true` | +| `route.enabled` | Enable OpenShift route | `false` | +| `route.hostname` | Hostname for the OpenShift route | `""` | +| `route.path` | Path for the OpenShift route | `""` | +| `route.termination` | TLS termination for the route | `edge` | +| `route.annotations` | Route annotations | `{}` | +| `route.labels` | Route labels | `{}` | +| `image.all.repository` | Image repository | `milvusdb/milvus` | +| `image.all.tag` | Image tag | `v2.6.4` | +| `image.all.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.all.pullSecrets` | Image pull secrets | `{}` | +| `image.tools.repository` | Config image repository | `milvusdb/milvus-config-tool` | +| `image.tools.tag` | Config image tag | `v0.1.2` | +| `image.tools.pullPolicy` | Config image pull policy | `IfNotPresent` | +| `customConfigMap` | User specified ConfigMap for configuration | +| `extraConfigFiles` | Extra config to override default milvus.yaml | `user.yaml:` | +| `service.type` | Service type | `ClusterIP` | +| `service.port` | Port where service is exposed | `19530` | +| `service.portName` | Useful for [Istio protocol selection](https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/) | `milvus` | +| `service.nodePort` | Service nodePort | `unset` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Service custom labels | `{}` | +| `service.clusterIP` | Internal cluster service IP | `unset` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `unset` | +| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | Service external IP addresses | `[]` | +| `ingress.enabled` | If true, Ingress will be created | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Ingress labels | `{}` | +| `ingress.rules` | Ingress rules | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `serviceAccount.create` | Create a custom service account | `false` | +| `serviceAccount.name` | Service Account name | `milvus` | +| `serviceAccount.annotations` | Service Account Annotations | `{}` | +| `serviceAccount.labels` | Service Account labels | `{}` | +| `metrics.enabled` | Export Prometheus monitoring metrics | `true` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `unset` | +| `log.level` | Logging level to be used. Valid levels are `debug`, `info`, `warn`, `error`, `fatal` | `info` | +| `log.file.maxSize` | The size limit of the log file (MB) | `300` | +| `log.file.maxAge` | The maximum number of days that the log is retained. (day) | `10` | +| `log.file.maxBackups` | The maximum number of retained logs. | `20` | +| `log.format` | Format used for the logs. Valid formats are `text` and `json` | `text` | +| `log.persistence.enabled` | Use persistent volume to store Milvus logs data | `false` | +| `log.persistence.mountPath` | Milvus logs data persistence volume mount path | `/milvus/logs` | +| `log.persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `log.persistence.persistentVolumeClaim.existingClaim` | Use your own data Persistent Volume existing claim name | `unset` | +| `log.persistence.persistentVolumeClaim.storageClass` | The Milvus logs data Persistent Volume Storage Class | `unset` | +| `log.persistence.persistentVolumeClaim.accessModes` | The Milvus logs data Persistence access modes | `ReadWriteOnce` | +| `log.persistence.persistentVolumeClaim.size` | The size of Milvus logs data Persistent Volume Storage Class | `5Gi` | +| `log.persistence.persistentVolumeClaim.subPath` | SubPath for Milvus logs data mount | `unset` | +| `externalS3.enabled` | Enable or disable external S3 | `false` | +| `externalS3.host` | The host of the external S3 | `unset` | +| `externalS3.port` | The port of the external S3 | `unset` | +| `externalS3.rootPath` | The path prefix of the external S3 | `unset` | +| `externalS3.accessKey` | The Access Key of the external S3 | `unset` | +| `externalS3.secretKey` | The Secret Key of the external S3 | `unset` | +| `externalS3.bucketName` | The Bucket Name of the external S3 | `unset` | +| `externalS3.useSSL` | If true, use SSL to connect to the external S3 | `false` | +| `externalS3.useIAM` | If true, use iam to connect to the external S3 | `false` | +| `externalS3.cloudProvider` | When `useIAM` enabled, only "aws" & "gcp" is supported for now | `aws` | +| `externalS3.iamEndpoint` | The IAM endpoint of the external S3 | `` | +| `externalS3.region` | The region of the external S3 | `` | +| `externalS3.useVirtualHost` | If true, the external S3 whether use virtual host bucket mode | `` | +| `externalEtcd.enabled` | Enable or disable external Etcd | `false` | +| `externalEtcd.endpoints` | The endpoints of the external etcd | `{}` | +| `externalPulsar.enabled` | Enable or disable external Pulsar | `false` | +| `externalPulsar.host` | The host of the external Pulsar | `localhost` | +| `externalPulsar.port` | The port of the external Pulsar | `6650` | +| `externalPulsar.tenant` | The tenant of the external Pulsar | `public` | +| `externalPulsar.namespace` | The namespace of the external Pulsar | `default` | +| `externalPulsar.authPlugin` | The authPlugin of the external Pulsar | `""` | +| `externalPulsar.authParams` | The authParams of the external Pulsar | `""` | +| `externalKafka.enabled` | Enable or disable external Kafka | `false` | +| `externalKafka.brokerList` | The brokerList of the external Kafka separated by comma | `localhost:9092` | +| `externalKafka.securityProtocol` | The securityProtocol used for kafka authentication | `SASL_SSL` | +| `externalKafka.sasl.mechanisms` | SASL mechanism to use for kafka authentication | `PLAIN` | +| `externalKafka.sasl.username` | username for PLAIN or SASL/PLAIN authentication | `` | +| `externalKafka.sasl.password` | password for PLAIN or SASL/PLAIN authentication | `` | + +### Milvus Standalone Deployment Configuration + +The following table lists the configurable parameters of the Milvus Standalone component and their default values. + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `standalone.resources` | Resource requests/limits for the Milvus Standalone pods | `{}` | +| `standalone.nodeSelector` | Node labels for Milvus Standalone pods assignment | `{}` | +| `standalone.affinity` | Affinity settings for Milvus Standalone pods assignment | `{}` | +| `standalone.tolerations` | Toleration labels for Milvus Standalone pods assignment | `[]` | +| `standalone.heaptrack.enabled` | Whether to enable heaptrack | `false` | +| `standalone.disk.enabled` | Whether to enable disk | `true` | +| `standalone.profiling.enabled` | Whether to enable live profiling | `false` | +| `standalone.extraEnv` | Additional Milvus Standalone container environment variables | `[]` | +| `standalone.messageQueue` | Message queue for Milvus Standalone: rocksmq, natsmq, pulsar, kafka | `rocksmq` | +| `standalone.persistence.enabled` | Use persistent volume to store Milvus standalone data | `true` | +| `standalone.persistence.mountPath` | Milvus standalone data persistence volume mount path | `/var/lib/milvus` | +| `standalone.persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `standalone.persistence.persistentVolumeClaim.existingClaim` | Use your own data Persistent Volume existing claim name | `unset` | +| `standalone.persistence.persistentVolumeClaim.storageClass` | The Milvus standalone data Persistent Volume Storage Class | `unset` | +| `standalone.persistence.persistentVolumeClaim.accessModes` | The Milvus standalone data Persistence access modes | `ReadWriteOnce` | +| `standalone.persistence.persistentVolumeClaim.size` | The size of Milvus standalone data Persistent Volume Storage Class | `5Gi` | +| `standalone.persistence.persistentVolumeClaim.subPath` | SubPath for Milvus standalone data mount | `unset` | + +### Milvus Proxy Deployment Configuration + +The following table lists the configurable parameters of the Milvus Proxy component and their default values. + +| Parameter | Description | Default | +|-------------------------------------------|---------------------------------------------------------|---------------| +| `proxy.enabled` | Enable or disable Milvus Proxy Deployment | `true` | +| `proxy.replicas` | Desired number of Milvus Proxy pods | `1` | +| `proxy.resources` | Resource requests/limits for the Milvus Proxy pods | `{}` | +| `proxy.nodeSelector` | Node labels for Milvus Proxy pods assignment | `{}` | +| `proxy.affinity` | Affinity settings for Milvus Proxy pods assignment | `{}` | +| `proxy.tolerations` | Toleration labels for Milvus Proxy pods assignment | `[]` | +| `proxy.heaptrack.enabled` | Whether to enable heaptrack | `false` | +| `proxy.profiling.enabled` | Whether to enable live profiling | `false` | +| `proxy.extraEnv` | Additional Milvus Proxy container environment variables | `[]` | +| `proxy.http.enabled` | Enable rest api for Milvus Proxy | `true` | +| `proxy.maxUserNum` | Modify the Milvus maximum user limit | `100` | +| `proxy.maxRoleNum` | Modify the Milvus maximum role limit | `10` | +| `proxy.http.debugMode.enabled` | Enable debug mode for rest api | `false` | +| `proxy.tls.enabled` | Enable porxy tls connection | `false` | +| `proxy.strategy` | Deployment strategy configuration | RollingUpdate | +| `proxy.annotations` | Additional pod annotations | `{}` | +| `proxy.hpa` | Enable hpa for proxy node | false | +| `proxy.minReplicas` | Specify the minimum number of replicas | 1 | +| `proxy.maxReplicas` | Specify the maximum number of replicas | 5 | +| `proxy.cpuUtilization` | Specify the cpu auto-scaling value | 40 | + + + +### Milvus Query Node Deployment Configuration + +The following table lists the configurable parameters of the Milvus Query Node component and their default values. + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `queryNode.enabled` | Enable or disable Milvus Query Node component | `true` | +| `queryNode.replicas` | Desired number of Milvus Query Node pods | `1` | +| `queryNode.resources` | Resource requests/limits for the Milvus Query Node pods | `{}` | +| `queryNode.nodeSelector` | Node labels for Milvus Query Node pods assignment | `{}` | +| `queryNode.affinity` | Affinity settings for Milvus Query Node pods assignment | `{}` | +| `queryNode.tolerations` | Toleration labels for Milvus Query Node pods assignment | `[]` | +| `queryNode.heaptrack.enabled` | Whether to enable heaptrack | `false` | +| `queryNode.disk.enabled` | Whether to enable disk for query | `true` | +| `queryNode.profiling.enabled` | Whether to enable live profiling | `false` | +| `queryNode.extraEnv` | Additional Milvus Query Node container environment variables | `[]` | +| `queryNode.strategy` | Deployment strategy configuration | RollingUpdate | +| `queryNode.annotations` | Additional pod annotations | `{}` | +| `queryNode.hpa` | Enable hpa for query node | false | +| `queryNode.minReplicas` | Specify the minimum number of replicas | 1 | +| `queryNode.maxReplicas` | Specify the maximum number of replicas | 5 | +| `queryNode.cpuUtilization` | Specify the cpu auto-scaling value | 40 | +| `queryNode.memoryUtilization` | Specify the memory auto-scaling value | 60 | + + + + + +### Milvus Data Node Deployment Configuration + +The following table lists the configurable parameters of the Milvus Data Node component and their default values. + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `dataNode.enabled` | Enable or disable Data Node component | `true` | +| `dataNode.replicas` | Desired number of Data Node pods | `1` | +| `dataNode.resources` | Resource requests/limits for the Milvus Data Node pods | `{}` | +| `dataNode.nodeSelector` | Node labels for Milvus Data Node pods assignment | `{}` | +| `dataNode.affinity` | Affinity settings for Milvus Data Node pods assignment | `{}` | +| `dataNode.tolerations` | Toleration labels for Milvus Data Node pods assignment | `[]` | +| `dataNode.heaptrack.enabled` | Whether to enable heaptrack | `false` | +| `dataNode.profiling.enabled` | Whether to enable live profiling | `false` | +| `dataNode.extraEnv` | Additional Milvus Data Node container environment variables | `[]` | +| `dataNode.strategy` | Deployment strategy configuration | RollingUpdate | +| `dataNode.annotations` | Additional pod annotations | `{}` | +| `dataNode.hpa` | Enable hpa for data node | false | +| `dataNode.minReplicas` | Specify the minimum number of replicas | 1 | +| `dataNode.maxReplicas` | Specify the maximum number of replicas | 5 | +| `dataNode.cpuUtilization` | Specify the cpu auto-scaling value | 40 | + +### Milvus Mix Coordinator Deployment Configuration + +The following table lists the configurable parameters of the Milvus Mix Coordinator component and their default values. The Mix Coordinator consolidates all coordinator functions (Root, Query, Index, and Data Coordinators) into a single component for improved efficiency and simplified deployment. + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `mixCoordinator.enabled` | Enable or disable Mix Coordinator component | `true` | +| `mixCoordinator.replicas` | Desired number of Mix Coordinator pods | `1` | +| `mixCoordinator.resources` | Resource requests/limits for the Milvus Mix Coordinator pods | `{}` | +| `mixCoordinator.nodeSelector` | Node labels for Milvus Mix Coordinator pods assignment | `{}` | +| `mixCoordinator.affinity` | Affinity settings for Milvus Mix Coordinator pods assignment | `{}` | +| `mixCoordinator.tolerations` | Toleration labels for Milvus Mix Coordinator pods assignment | `[]` | +| `mixCoordinator.heaptrack.enabled` | Whether to enable heaptrack | `false` | +| `mixCoordinator.profiling.enabled` | Whether to enable live profiling | `false` | +| `mixCoordinator.activeStandby.enabled` | Whether to enable active-standby | `false` | +| `mixCoordinator.extraEnv` | Additional Milvus Mix Coordinator container environment variables | `[]` | +| `mixCoordinator.service.type` | Service type | `ClusterIP` | +| `mixCoordinator.service.annotations` | Service annotations | `{}` | +| `mixCoordinator.service.labels` | Service custom labels | `{}` | +| `mixCoordinator.service.clusterIP` | Internal cluster service IP | `unset` | +| `mixCoordinator.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `unset` | +| `mixCoordinator.service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to lb (if supported) | `[]` | +| `mixCoordinator.service.externalIPs` | Service external IP addresses | `[]` | +| `mixCoordinator.strategy` | Deployment strategy configuration | RollingUpdate | +| `mixCoordinator.annotations` | Additional pod annotations | `{}` | + +### Milvus Streaming Node Deployment Configuration + +The following table lists the configurable parameters of the Milvus Streaming Node component and their default values. The Streaming Node is a new component introduced in Milvus v2.6.x for enhanced data processing and streaming operations. + +| Parameter | Description | Default | +|-------------------------------------------|---------------------------------------------------------|---------------| +| `streamingNode.enabled` | Enable or disable Streaming Node component | `true` | +| `streamingNode.replicas` | Desired number of Streaming Node pods | `1` | +| `streamingNode.resources` | Resource requests/limits for the Milvus Streaming Node pods | `{}` | +| `streamingNode.nodeSelector` | Node labels for Milvus Streaming Node pods assignment | `{}` | +| `streamingNode.affinity` | Affinity settings for Milvus Streaming Node pods assignment | `{}` | +| `streamingNode.tolerations` | Toleration labels for Milvus Streaming Node pods assignment | `[]` | +| `streamingNode.securityContext` | Security context for Streaming Node pods | `{}` | +| `streamingNode.containerSecurityContext` | Container security context for Streaming Node | `{}` | +| `streamingNode.heaptrack.enabled` | Whether to enable heaptrack | `false` | +| `streamingNode.profiling.enabled` | Whether to enable live profiling | `false` | +| `streamingNode.extraEnv` | Additional Milvus Streaming Node container environment variables | `[]` | +| `streamingNode.strategy` | Deployment strategy configuration | `{}` | + + +### TEI Configuration + +The following table lists the configurable parameters of the Text Embeddings Inference (TEI) component and their default values. + +| Parameter | Description | Default | +|-------------------------------------------|---------------------------------------------------------|---------------------------------------------| +| `tei.enabled` | Enable or disable TEI deployment | `false` | +| `tei.name` | Name of the TEI component | `text-embeddings-inference` | +| `tei.image.repository` | TEI image repository | `ghcr.io/huggingface/text-embeddings-inference` | +| `tei.image.tag` | TEI image tag | `cpu-1.6` | +| `tei.image.pullPolicy` | TEI image pull policy | `IfNotPresent` | +| `tei.service.type` | TEI service type | `ClusterIP` | +| `tei.service.port` | TEI service port | `8080` | +| `tei.service.annotations` | TEI service annotations | `{}` | +| `tei.service.labels` | TEI service custom labels | `{}` | +| `tei.resources.requests.cpu` | CPU resource requests for TEI pods | `4` | +| `tei.resources.requests.memory` | Memory resource requests for TEI pods | `8Gi` | +| `tei.resources.limits.cpu` | CPU resource limits for TEI pods | `8` | +| `tei.resources.limits.memory` | Memory resource limits for TEI pods | `16Gi` | +| `tei.persistence.enabled` | Enable persistence for TEI | `true` | +| `tei.persistence.mountPath` | Mount path for TEI persistence | `/data` | +| `tei.persistence.annotations` | Annotations for TEI PVC | `{}` | +| `tei.persistence.persistentVolumeClaim.existingClaim` | Existing PVC for TEI | `""` | +| `tei.persistence.persistentVolumeClaim.storageClass` | Storage class for TEI PVC | `""` | +| `tei.persistence.persistentVolumeClaim.accessModes` | Access modes for TEI PVC | `ReadWriteOnce` | +| `tei.persistence.persistentVolumeClaim.size` | Size of TEI PVC | `50Gi` | +| `tei.persistence.persistentVolumeClaim.subPath` | SubPath for TEI PVC | `""` | +| `tei.modelId` | Model ID for TEI | `BAAI/bge-large-en-v1.5` | +| `tei.extraArgs` | Additional arguments for TEI | `[]` | +| `tei.nodeSelector` | Node labels for TEI pods assignment | `{}` | +| `tei.affinity` | Affinity settings for TEI pods assignment | `{}` | +| `tei.tolerations` | Toleration labels for TEI pods assignment | `[]` | +| `tei.topologySpreadConstraints` | Topology spread constraints for TEI pods | `[]` | +| `tei.extraEnv` | Additional TEI container environment variables | `[]` | +| `tei.replicas` | Number of TEI replicas | `1` | + + +### Pulsar Configuration + +This version of the chart includes the dependent Pulsar chart in the charts/ directory. +- `pulsar-v3.3.0` is used for Pulsar v3 +- `pulsar-v2.7.8` is used for Pulsar v2 + +Since milvus chart version 4.2.21, pulsar v3 is supported, but pulsar v2 will be still used by default until the release of Milvus v2.5.0. + +We recommend creating new instances with pulsar v3 to avoid security vulnerabilities & some bugs in pulsar v2. To use pulsar v3, set `pulsarv3.enabled` to `true` and `pulsar.enabled` to `false`. Set other values for pulsar v3 under `pulsarv3` field. + +You can find more information at: +* [https://pulsar.apache.org/charts](https://pulsar.apache.org/charts) + +### Etcd Configuration + +This version of the chart includes the dependent Etcd chart in the charts/ directory. + +You can find more information at: +* [https://artifacthub.io/packages/helm/bitnami/etcd](https://artifacthub.io/packages/helm/bitnami/etcd) + +### Minio Configuration + +This version of the chart includes the dependent Minio chart in the charts/ directory. + +You can find more information at: +* [https://github.com/minio/charts/blob/master/README.md](https://github.com/minio/charts/blob/master/README.md) + +### Kafka Configuration + +This version of the chart includes the dependent Kafka chart in the charts/ directory. + +You can find more information at: +* [https://artifacthub.io/packages/helm/bitnami/kafka](https://artifacthub.io/packages/helm/bitnami/kafka) + + +### Node Selector, Affinity and Tolerations Configuration Guide + +- [Node Selector Configuration Guide](../../docs/node-selector-configuration-guide.md) +- [Affinity Configuration Guide](../../docs/affinity-configuration-guide.md) +- [Tolerations Configuration Guide](../../docs/tolerations-configuration-guide.md) + +#### Important Configuration Considerations + +When configuring pod scheduling in Kubernetes, be aware of potential conflicts between different scheduling mechanisms: + +1. **Node Selectors vs Node Affinity** + - Node selectors provide a simple way to constrain pods to nodes with specific labels + - Node affinity provides more flexible pod scheduling rules + - When used together, **both** node selector and node affinity rules must be satisfied + - Consider using only one mechanism unless you have specific requirements + +2. **Scheduling Priority** + - Node Selectors: Hard requirement that must be satisfied + - Required Node Affinity: Hard requirement that must be satisfied + - Preferred Node Affinity: Soft preference that Kubernetes will try to satisfy + - Pod Anti-Affinity: Can prevent pods from being scheduled on the same node + +3. **Best Practices** + - Start with simple node selectors for basic constraints + - Use node/pod affinity for more complex scheduling requirements + - Avoid combining multiple scheduling constraints unless necessary + - Test your configuration in a non-production environment first + +For detailed examples and configurations, please refer to the documentation guides linked above. + +### Milvus Live Profiling +Profiling is an effective way of understanding which parts of your application are consuming the most resources. + +Continuous Profiling adds a dimension of time that allows you to understand your systems resource usage (i.e. CPU, Memory, etc.) over time and gives you the ability to locate, debug, and fix issues related to performance. + +You can enable profiling with Pyroscope and you can find more information at: +* [https://pyroscope.io/docs/kubernetes-helm-chart/](https://pyroscope.io/docs/kubernetes-helm-chart/) + +## Text Embeddings Inference (TEI) Integration Guide for Milvus + +Text Embeddings Inference (TEI) is a high-performance text embedding model inference service that converts text into vector representations. Milvus is a vector database that can store and retrieve these vectors. By combining the two, you can build powerful semantic search and retrieval systems. + +TEI is an open-source project developed by Hugging Face, available at [https://github.com/huggingface/text-embeddings-inference](https://github.com/huggingface/text-embeddings-inference). + +This guide provides two ways to use TEI: +1. Deploy TEI service directly through the Milvus Helm Chart +2. Use external TEI service with Milvus integration + +For detailed configuration and usage instructions, please refer to the [README-TEI.md](./README-TEI.md) document. diff --git a/deployment/helm/milvus/charts/etcd/.helmignore b/deployment/helm/milvus/charts/etcd/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/helm/milvus/charts/etcd/Chart.lock b/deployment/helm/milvus/charts/etcd/Chart.lock new file mode 100644 index 000000000..e70efc4c0 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.4.0 +digest: sha256:8c1a5dc923412d11d4d841420494b499cb707305c8b9f87f45ea1a8bf3172cb3 +generated: "2023-05-21T14:12:59.250402885Z" diff --git a/deployment/helm/milvus/charts/etcd/Chart.yaml b/deployment/helm/milvus/charts/etcd/Chart.yaml new file mode 100644 index 000000000..fc632733d --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.5.9 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: etcd is a distributed key-value store designed to securely store data + across a cluster. etcd is widely used in production on account of its reliability, + fault-tolerance and ease of use. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/etcd/img/etcd-stack-220x234.png +keywords: +- etcd +- cluster +- database +- cache +- key-value +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: etcd +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/etcd +version: 8.12.0 diff --git a/deployment/helm/milvus/charts/etcd/README.md b/deployment/helm/milvus/charts/etcd/README.md new file mode 100644 index 000000000..93d4a12c2 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/README.md @@ -0,0 +1,537 @@ + + +# Etcd packaged by Bitnami + +etcd is a distributed key-value store designed to securely store data across a cluster. etcd is widely used in production on account of its reliability, fault-tolerance and ease of use. + +[Overview of Etcd](https://etcd.io/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/etcd +``` + +## Introduction + +This chart bootstraps a [etcd](https://github.com/bitnami/containers/tree/main/bitnami/etcd) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/etcd +``` + +These commands deploy etcd on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### etcd parameters + +| Name | Description | Value | +| -------------------------------------- | ----------------------------------------------------------------------------------------------------------- | -------------------- | +| `image.registry` | etcd image registry | `docker.io` | +| `image.repository` | etcd image name | `bitnami/etcd` | +| `image.tag` | etcd image tag | `3.5.9-debian-11-r4` | +| `image.digest` | etcd image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | etcd image pull policy | `IfNotPresent` | +| `image.pullSecrets` | etcd image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | +| `auth.rbac.create` | Switch to enable RBAC authentication | `true` | +| `auth.rbac.allowNoneAuthentication` | Allow to use etcd without configuring RBAC authentication | `true` | +| `auth.rbac.rootPassword` | Root user password. The root user is always `root` | `""` | +| `auth.rbac.existingSecret` | Name of the existing secret containing credentials for the root user | `""` | +| `auth.rbac.existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `""` | +| `auth.token.enabled` | Enables token authentication | `true` | +| `auth.token.type` | Authentication token type. Allowed values: 'simple' or 'jwt' | `jwt` | +| `auth.token.privateKey.filename` | Name of the file containing the private key for signing the JWT token | `jwt-token.pem` | +| `auth.token.privateKey.existingSecret` | Name of the existing secret containing the private key for signing the JWT token | `""` | +| `auth.token.signMethod` | JWT token sign method | `RS256` | +| `auth.token.ttl` | JWT token TTL | `10m` | +| `auth.client.secureTransport` | Switch to encrypt client-to-server communications using TLS certificates | `false` | +| `auth.client.useAutoTLS` | Switch to automatically create the TLS certificates | `false` | +| `auth.client.existingSecret` | Name of the existing secret containing the TLS certificates for client-to-server communications | `""` | +| `auth.client.enableAuthentication` | Switch to enable host authentication using TLS certificates. Requires existing secret | `false` | +| `auth.client.certFilename` | Name of the file containing the client certificate | `cert.pem` | +| `auth.client.certKeyFilename` | Name of the file containing the client certificate private key | `key.pem` | +| `auth.client.caFilename` | Name of the file containing the client CA certificate | `""` | +| `auth.peer.secureTransport` | Switch to encrypt server-to-server communications using TLS certificates | `false` | +| `auth.peer.useAutoTLS` | Switch to automatically create the TLS certificates | `false` | +| `auth.peer.existingSecret` | Name of the existing secret containing the TLS certificates for server-to-server communications | `""` | +| `auth.peer.enableAuthentication` | Switch to enable host authentication using TLS certificates. Requires existing secret | `false` | +| `auth.peer.certFilename` | Name of the file containing the peer certificate | `cert.pem` | +| `auth.peer.certKeyFilename` | Name of the file containing the peer certificate private key | `key.pem` | +| `auth.peer.caFilename` | Name of the file containing the peer CA certificate | `""` | +| `autoCompactionMode` | Auto compaction mode, by default periodic. Valid values: "periodic", "revision". | `""` | +| `autoCompactionRetention` | Auto compaction retention for mvcc key value store in hour, by default 0, means disabled | `""` | +| `initialClusterState` | Initial cluster state. Allowed values: 'new' or 'existing' | `""` | +| `logLevel` | Sets the log level for the etcd process. Allowed values: 'debug', 'info', 'warn', 'error', 'panic', 'fatal' | `info` | +| `maxProcs` | Limits the number of operating system threads that can execute user-level | `""` | +| `removeMemberOnContainerTermination` | Use a PreStop hook to remove the etcd members from the etcd cluster on container termination | `true` | +| `configuration` | etcd configuration. Specify content for etcd.conf.yml | `""` | +| `existingConfigmap` | Existing ConfigMap with etcd configuration | `""` | +| `extraEnvVars` | Extra environment variables to be set on etcd container | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars | `""` | +| `command` | Default container command (useful when using custom images) | `[]` | +| `args` | Default container args (useful when using custom images) | `[]` | + +### etcd statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ----------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of etcd replicas to deploy | `1` | +| `updateStrategy.type` | Update strategy type, can be set to RollingUpdate or OnDelete. | `RollingUpdate` | +| `podManagementPolicy` | Pod management policy for the etcd statefulset | `Parallel` | +| `hostAliases` | etcd pod host aliases | `[]` | +| `lifecycleHooks` | Override default etcd container hooks | `{}` | +| `containerPorts.client` | Client port to expose at container level | `2379` | +| `containerPorts.peer` | Peer port to expose at container level | `2380` | +| `podSecurityContext.enabled` | Enabled etcd pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set etcd pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled etcd containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set etcd container's Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set etcd container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `resources.limits` | The resources limits for the etcd container | `{}` | +| `resources.requests` | The requested resources for the etcd container | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `60` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for etcd pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for etcd container(s) | `[]` | +| `initContainers` | Add additional init containers to the etcd pods | `[]` | +| `sidecars` | Add additional sidecar containers to the etcd pods | `[]` | +| `podAnnotations` | Annotations for etcd pods | `{}` | +| `podLabels` | Extra labels for etcd pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `priorityClassName` | Name of the priority class to be used by etcd pods | `""` | +| `runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `shareProcessNamespace` | Enable shared process namespace in a pod. | `false` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` | +| `persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | + +### Traffic exposure parameters + +| Name | Description | Value | +| ---------------------------------- | ---------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.enabled` | create second service if equal true | `true` | +| `service.clusterIP` | Kubernetes service Cluster IP | `""` | +| `service.ports.client` | etcd client port | `2379` | +| `service.ports.peer` | etcd peer port | `2380` | +| `service.nodePorts.client` | Specify the nodePort client value for the LoadBalancer and NodePort service types. | `""` | +| `service.nodePorts.peer` | Specify the nodePort peer value for the LoadBalancer and NodePort service types. | `""` | +| `service.clientPortNameOverride` | etcd client port name override | `""` | +| `service.peerPortNameOverride` | etcd peer port name override | `""` | +| `service.loadBalancerIP` | loadBalancerIP for the etcd service (optional, cloud specific) | `""` | +| `service.loadBalancerSourceRanges` | Load Balancer source ranges | `[]` | +| `service.externalIPs` | External IPs | `[]` | +| `service.externalTrafficPolicy` | %%MAIN_CONTAINER_NAME%% service external traffic policy | `Cluster` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.annotations` | Additional annotations for the etcd service | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | + +### Persistence parameters + +| Name | Description | Value | +| -------------------------- | --------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | If true, use a Persistent Volume Claim. If false, use emptyDir. | `true` | +| `persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for etcd data volume | `8Gi` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r118` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + +### Network Policy parameters + +| Name | Description | Value | +| --------------------------------------- | ---------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ------------ | +| `metrics.enabled` | Expose etcd metrics | `false` | +| `metrics.podAnnotations` | Annotations for the Prometheus metrics on etcd pods | `{}` | +| `metrics.podMonitor.enabled` | Create PodMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.podMonitor.namespace` | Namespace in which Prometheus is running | `monitoring` | +| `metrics.podMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.podMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `30s` | +| `metrics.podMonitor.additionalLabels` | Additional labels that can be used so PodMonitors will be discovered by Prometheus | `{}` | +| `metrics.podMonitor.scheme` | Scheme to use for scraping | `http` | +| `metrics.podMonitor.tlsConfig` | TLS configuration used for scrape endpoints used by Prometheus | `{}` | +| `metrics.podMonitor.relabelings` | Prometheus relabeling rules | `[]` | +| `metrics.prometheusRule.enabled` | Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` | + +### Snapshotting parameters + +| Name | Description | Value | +| ----------------------------------------------- | ----------------------------------------------------------------------- | -------------- | +| `startFromSnapshot.enabled` | Initialize new cluster recovering an existing snapshot | `false` | +| `startFromSnapshot.existingClaim` | Existing PVC containing the etcd snapshot | `""` | +| `startFromSnapshot.snapshotFilename` | Snapshot filename | `""` | +| `disasterRecovery.enabled` | Enable auto disaster recovery by periodically snapshotting the keyspace | `false` | +| `disasterRecovery.cronjob.schedule` | Schedule in Cron format to save snapshots | `*/30 * * * *` | +| `disasterRecovery.cronjob.historyLimit` | Number of successful finished jobs to retain | `1` | +| `disasterRecovery.cronjob.snapshotHistoryLimit` | Number of etcd snapshots to retain, tagged by date | `1` | +| `disasterRecovery.cronjob.snapshotsDir` | Directory to store snapshots | `/snapshots` | +| `disasterRecovery.cronjob.podAnnotations` | Pod annotations for cronjob pods | `{}` | +| `disasterRecovery.cronjob.resources.limits` | Cronjob container resource limits | `{}` | +| `disasterRecovery.cronjob.resources.requests` | Cronjob container resource requests | `{}` | +| `disasterRecovery.cronjob.nodeSelector` | Node labels for cronjob pods assignment | `{}` | +| `disasterRecovery.cronjob.tolerations` | Tolerations for cronjob pods assignment | `[]` | +| `disasterRecovery.pvc.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `disasterRecovery.pvc.size` | PVC Storage Request | `2Gi` | +| `disasterRecovery.pvc.storageClassName` | Storage Class for snapshots volume | `nfs` | +| `disasterRecovery.pvc.subPath` | Path within the volume from which to mount | `""` | + +### Service account parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable/disable service account creation | `false` | +| `serviceAccount.name` | Name of the service account to create or use | `""` | +| `serviceAccount.automountServiceAccountToken` | Enable/disable auto mounting of service account token | `true` | +| `serviceAccount.annotations` | Additional annotations to be included on the service account | `{}` | +| `serviceAccount.labels` | Additional labels to be included on the service account | `{}` | + +### Other parameters + +| Name | Description | Value | +| -------------------- | -------------------------------------------------------------- | ------ | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `51%` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.rbac.rootPassword=secretpassword oci://registry-1.docker.io/bitnamicharts/etcd +``` + +The above command sets the etcd `root` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/etcd +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Cluster configuration + +The Bitnami etcd chart can be used to bootstrap an etcd cluster, easy to scale and with available features to implement disaster recovery. + +Refer to the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/etcd/get-started/understand-default-configuration/) for more information about all these details. + +### Enable security for etcd + +The etcd chart can be configured with Role-based access control and TLS encryption to improve its security. + +[Learn more about security in the chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/etcd/administration/enable-security/). + +### Persistence + +The [Bitnami etcd](https://github.com/bitnami/containers/tree/main/bitnami/etcd) image stores the etcd data at the `/bitnami/etcd` path of the container. Persistent Volume Claims are used to keep the data across statefulsets. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Backup and restore the etcd keyspace + +The Bitnami etcd chart provides mechanisms to bootstrap the etcd cluster restoring an existing snapshot before initializing. + +[Learn more about backup/restore features in the chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/etcd/administration/backup-restore/). + +### Exposing etcd metrics + +The metrics exposed by etcd can be exposed to be scraped by Prometheus. This can be done by adding the required annotations for Prometheus to discover the metrics endpoints or creating a PodMonitor entry if you are using the Prometheus Operator. + +[Learn more about exposing metrics in the chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/etcd/administration/enable-metrics/). + +### Using custom configuration + +In order to use custom configuration parameters, two options are available: + +- Using environment variables: etcd allows setting environment variables that map to configuration settings. In order to set extra environment variables, you can use the `extraEnvVars` property. Alternatively, you can use a ConfigMap or a Secret with the environment variables using the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +```yaml +extraEnvVars: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: "0" + - name: ETCD_HEARTBEAT_INTERVAL + value: "150" +``` + +- Using a custom `etcd.conf.yml`: The etcd chart allows mounting a custom `etcd.conf.yml` file as ConfigMap. In order to so, you can use the `configuration` property. Alternatively, you can use an existing ConfigMap using the `existingConfigmap` parameter. + +### Auto Compaction + +Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Compacting the keyspace history drops all information about keys superseded prior to a given keyspace revision. The space used by these keys then becomes available for additional writes to the keyspace. + +`autoCompactionMode`, by default periodic. Valid values: "periodic", "revision". + +- 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. "5m"). +- 'revision' for revision number based retention. +`autoCompactionRetention` for mvcc key value store in hour, by default 0, means disabled. + +You can enable auto compaction by using following parameters: + +```console +autoCompactionMode=periodic +autoCompactionRetention=10m +``` + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as the etcd app (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 8.0.0 + +This version reverts the change in the previous major bump ([7.0.0](https://github.com/bitnami/charts/tree/main/bitnami/etcd#to-700)). Now the default `etcd` branch is `3.5` again once confirmed by the [etcd developers](https://github.com/etcd-io/etcd/tree/main/CHANGELOG#production-recommendation) that this version is production-ready once solved the data corruption issue. + +### To 7.0.0 + +This version changes the default `etcd` branch to `3.4` as suggested by [etcd developers](https://github.com/etcd-io/etcd/tree/main/CHANGELOG#production-recommendation). In order to migrate the data follow the official etcd instructions. + +### To 6.0.0 + +This version introduces several features and performance improvements: + +- The statefulset can now be scaled using `kubectl scale` command. Using `helm upgrade` to recalculate available endpoints is no longer needed. +- The scripts used for bootstrapping, runtime reconfiguration, and disaster recovery have been refactored and moved to the etcd container with two purposes: removing technical debt & improving the stability. +- Several parameters were reorganized to simplify the structure and follow the same standard used on other Bitnami charts: + - `etcd.initialClusterState` is renamed to `initialClusterState`. + - `statefulset.replicaCount` is renamed to `replicaCount`. + - `statefulset.podManagementPolicy` is renamed to `podManagementPolicy`. + - `statefulset.updateStrategy` and `statefulset.rollingUpdatePartition` are merged into `updateStrategy`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - `configFileConfigMap` is deprecated in favor of `configuration` and `existingConfigmap`. + - `envVarsConfigMap` is deprecated in favor of `extraEnvVars`, `extraEnvVarsCM` and `extraEnvVarsSecret`. + - `allowNoneAuthentication` is renamed to `auth.rbac.allowNoneAuthentication`. +- New parameters/features were added: + - `extraDeploy` to deploy any extra desired object. + - `initContainers` and `sidecars` to define custom init containers and sidecars. + - `extraVolumes` and `extraVolumeMounts` to define custom volumes and mount points. + - Probes can be now customized, and support to startup probes is added. + - LifecycleHooks can be customized using `lifecycleHooks` parameter. + - The default command/args can be customized using `command` and `args` parameters. +- Metrics integration with Prometheus Operator does no longer use a ServiceMonitor object, but a PodMonitor instead. + +Consequences: + +- Backwards compatibility is not guaranteed unless you adapt you **values.yaml** according to the changes described above. + +### To 5.2.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 5.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/etcd/administration/upgrade-helm3/). + +### To 4.4.14 + +In this release we addressed a vulnerability that showed the `ETCD_ROOT_PASSWORD` environment variable in the application logs. Users are advised to update immediately. More information in [this issue](https://github.com/bitnami/charts/issues/1901). + +### To 3.0.0 + +Backwards compatibility is not guaranteed. The following notables changes were included: + +- **etcdctl** uses v3 API. +- Adds support for auto disaster recovery. +- Labels are adapted to follow the Helm charts best practices. + +To upgrade from previous charts versions, create a snapshot of the keyspace and restore it in a new etcd cluster. Only v3 API data can be restored. +You can use the command below to upgrade your chart by starting a new cluster using an existing snapshot, available in an existing PVC, to initialize the members: + +```console +helm install new-release oci://registry-1.docker.io/bitnamicharts/etcd \ + --set statefulset.replicaCount=3 \ + --set persistence.enabled=true \ + --set persistence.size=8Gi \ + --set startFromSnapshot.enabled=true \ + --set startFromSnapshot.existingClaim=my-claim \ + --set startFromSnapshot.snapshotFilename=my-snapshot.db +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is etcd: + +```console +kubectl delete statefulset etcd --cascade=false +``` + +## License + +Copyright © 2023 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/deployment/helm/milvus/charts/etcd/charts/common/.helmignore b/deployment/helm/milvus/charts/etcd/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/milvus/charts/etcd/charts/common/Chart.yaml b/deployment/helm/milvus/charts/etcd/charts/common/Chart.yaml new file mode 100644 index 000000000..4fc56bbb7 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.4.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.4.0 diff --git a/deployment/helm/milvus/charts/etcd/charts/common/README.md b/deployment/helm/milvus/charts/etcd/charts/common/README.md new file mode 100644 index 000000000..72fca33da --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_affinities.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..81902a681 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_capabilities.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..697486a31 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_capabilities.tpl @@ -0,0 +1,180 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_errors.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_images.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_images.tpl new file mode 100644 index 000000000..d60c22e25 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_images.tpl @@ -0,0 +1,80 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_ingress.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..831da9caa --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_labels.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_names.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_names.tpl new file mode 100644 index 000000000..617a23489 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_names.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_secrets.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a1708b2e8 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_storage.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_tplvalues.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_utils.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..b1ead50cf --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/_warnings.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_cassandra.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mariadb.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mongodb.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..f820ec107 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mysql.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..74472a061 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_postgresql.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_redis.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..dcccfc1ae --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_validations.tpl b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/charts/common/values.yaml b/deployment/helm/milvus/charts/etcd/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/deployment/helm/milvus/charts/etcd/templates/NOTES.txt b/deployment/helm/milvus/charts/etcd/templates/NOTES.txt new file mode 100644 index 000000000..bfa6e1626 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/NOTES.txt @@ -0,0 +1,119 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if and (eq .Values.service.type "LoadBalancer") .Values.auth.rbac.allowNoneAuthentication }} +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer", "auth.rbac.enabled=false" and + "auth.rbac.allowNoneAuthentication=true" you have most likely exposed the etcd + service externally without any authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "auth.rbac.enabled=true" + providing a valid password on "auth.rbac.rootPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/etcd/entrypoint.sh /opt/bitnami/scripts/etcd/run.sh + +{{- else }} + +etcd can be accessed via port {{ coalesce .Values.service.ports.client .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To create a pod that you can use as a etcd client run the following command: + + kubectl run {{ template "common.names.fullname" . }}-client --restart='Never' --image {{ template "etcd.image" . }}{{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} --env ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} -o jsonpath="{.data.etcd-root-password}" | base64 -d){{- end }} --env ETCDCTL_ENDPOINTS="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ coalesce .Values.service.ports.client .Values.service.port }}" --namespace {{ .Release.Namespace }} --command -- sleep infinity + +Then, you can set/get a key using the commands below: + + kubectl exec --namespace {{ .Release.Namespace }} -it {{ template "common.names.fullname" . }}-client -- bash + {{- $etcdAuthOptions := include "etcd.authOptions" . }} + etcdctl {{ $etcdAuthOptions }} put /message Hello + etcdctl {{ $etcdAuthOptions }} get /message + +To connect to your etcd server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + echo "etcd URL: http://$NODE_IP:$NODE_PORT/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "etcd URL: http://$SERVICE_IP:{{ coalesce .Values.service.ports.client .Values.service.port }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ coalesce .Values.service.ports.client .Values.service.port }}:{{ coalesce .Values.service.ports.client .Values.service.port }} & + echo "etcd URL: http://127.0.0.1:{{ coalesce .Values.service.ports.client .Values.service.port }}" + +{{- end }} +{{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + + * As rbac is enabled you should add the flag `--user root:$ETCD_ROOT_PASSWORD` to the etcdctl commands. Use the command below to export the password: + + export ETCD_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} -o jsonpath="{.data.etcd-root-password}" | base64 -d) + +{{- end }} +{{- if .Values.auth.client.secureTransport }} +{{- if .Values.auth.client.useAutoTLS }} + + * As TLS is enabled you should add the flag `--cert-file /bitnami/etcd/data/fixtures/client/cert.pem --key-file /bitnami/etcd/data/fixtures/client/key.pem` to the etcdctl commands. + +{{- else }} + + * As TLS is enabled you should add the flag `--cert-file /opt/bitnami/etcd/certs/client/{{ .Values.auth.client.certFilename }} --key-file /opt/bitnami/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}` to the etcdctl commands. + +{{- end }} + + * You should also export a proper etcdctl endpoint using the https schema. Eg. + + export ETCDCTL_ENDPOINTS=https://{{ template "common.names.fullname" . }}-0:{{ coalesce .Values.service.ports.client .Values.service.port }} + +{{- end }} +{{- if .Values.auth.client.enableAuthentication }} + + * As TLS host authentication is enabled you should add the flag `--ca-file /opt/bitnami/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}` to the etcdctl commands. + +{{- end }} +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "etcd.validateValues" . }} +{{- $requiredPassword := list -}} +{{- $secretName := include "etcd.secretName" . -}} +{{- if and (or .Values.auth.rbac.create .Values.auth.rbac.enabled) (not .Values.auth.rbac.existingSecret) -}} + {{- $requiredEtcdPassword := dict "valueKey" "auth.rbac.rootPassword" "secret" $secretName "field" "etcd-root-password" -}} + {{- $requiredPassword = append $requiredPassword $requiredEtcdPassword -}} +{{- end -}} +{{- $requiredEtcdPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPassword "context" $) -}} +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredEtcdPasswordErrors) "context" $) -}} diff --git a/deployment/helm/milvus/charts/etcd/templates/_helpers.tpl b/deployment/helm/milvus/charts/etcd/templates/_helpers.tpl new file mode 100644 index 000000000..35b1a57e0 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/_helpers.tpl @@ -0,0 +1,205 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper etcd image name +*/}} +{{- define "etcd.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "etcd.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "etcd.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper etcd peer protocol +*/}} +{{- define "etcd.peerProtocol" -}} +{{- if .Values.auth.peer.secureTransport -}} +{{- print "https" -}} +{{- else -}} +{{- print "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper etcd client protocol +*/}} +{{- define "etcd.clientProtocol" -}} +{{- if .Values.auth.client.secureTransport -}} +{{- print "https" -}} +{{- else -}} +{{- print "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper etcdctl authentication options +*/}} +{{- define "etcd.authOptions" -}} +{{- $rbacOption := "--user root:$ROOT_PASSWORD" -}} +{{- $certsOption := " --cert $ETCD_CERT_FILE --key $ETCD_KEY_FILE" -}} +{{- $autoCertsOption := " --cert /bitnami/etcd/data/fixtures/client/cert.pem --key /bitnami/etcd/data/fixtures/client/key.pem" -}} +{{- $caOption := " --cacert $ETCD_TRUSTED_CA_FILE" -}} +{{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled -}} + {{- printf "%s" $rbacOption -}} +{{- end -}} +{{- if and .Values.auth.client.secureTransport .Values.auth.client.useAutoTLS -}} + {{- printf "%s" $autoCertsOption -}} +{{- else if and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS) -}} + {{- printf "%s" $certsOption -}} + {{- if .Values.auth.client.enableAuthentication -}} + {{- printf "%s" $caOption -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the etcd configuration configmap +*/}} +{{- define "etcd.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "etcd.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with etcd credentials +*/}} +{{- define "etcd.secretName" -}} + {{- if .Values.auth.rbac.existingSecret -}} + {{- printf "%s" .Values.auth.rbac.existingSecret | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the secret password key to be retrieved from etcd secret. +*/}} +{{- define "etcd.secretPasswordKey" -}} +{{- if and .Values.auth.rbac.existingSecret .Values.auth.rbac.existingSecretPasswordKey -}} +{{- printf "%s" .Values.auth.rbac.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "etcd-root-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for the etcd token private key +*/}} +{{- define "etcd.token.createSecret" -}} +{{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") (empty .Values.auth.token.privateKey.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with etcd token private key +*/}} +{{- define "etcd.token.secretName" -}} + {{- if .Values.auth.token.privateKey.existingSecret -}} + {{- printf "%s" .Values.auth.token.privateKey.existingSecret | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-jwt-token" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} + {{- end -}} +{{- end -}} + +{{/* +Return the proper Disaster Recovery PVC name +*/}} +{{- define "etcd.disasterRecovery.pvc.name" -}} +{{- if .Values.disasterRecovery.pvc.existingClaim -}} + {{- printf "%s" (tpl .Values.disasterRecovery.pvc.existingClaim $) | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.startFromSnapshot.existingClaim -}} + {{- printf "%s" (tpl .Values.startFromSnapshot.existingClaim $) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- printf "%s-snapshotter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "etcd.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "common.names.fullname" .) .Values.serviceAccount.name | trunc 63 | trimSuffix "-" }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name | trunc 63 | trimSuffix "-" }} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "etcd.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "etcd.validateValues.startFromSnapshot.existingClaim" .) -}} +{{- $messages := append $messages (include "etcd.validateValues.startFromSnapshot.snapshotFilename" .) -}} +{{- $messages := append $messages (include "etcd.validateValues.disasterRecovery" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of etcd - an existing claim must be provided when startFromSnapshot is enabled */}} +{{- define "etcd.validateValues.startFromSnapshot.existingClaim" -}} +{{- if and .Values.startFromSnapshot.enabled (not .Values.startFromSnapshot.existingClaim) (not .Values.disasterRecovery.enabled) -}} +etcd: startFromSnapshot.existingClaim + An existing claim must be provided when startFromSnapshot is enabled and disasterRecovery is disabled!! + Please provide it (--set startFromSnapshot.existingClaim="xxxx") +{{- end -}} +{{- end -}} + +{{/* Validate values of etcd - the snapshot filename must be provided when startFromSnapshot is enabled */}} +{{- define "etcd.validateValues.startFromSnapshot.snapshotFilename" -}} +{{- if and .Values.startFromSnapshot.enabled (not .Values.startFromSnapshot.snapshotFilename) (not .Values.disasterRecovery.enabled) -}} +etcd: startFromSnapshot.snapshotFilename + The snapshot filename must be provided when startFromSnapshot is enabled and disasterRecovery is disabled!! + Please provide it (--set startFromSnapshot.snapshotFilename="xxxx") +{{- end -}} +{{- end -}} + +{{/* Validate values of etcd - persistence must be enabled when disasterRecovery is enabled */}} +{{- define "etcd.validateValues.disasterRecovery" -}} +{{- if and .Values.disasterRecovery.enabled (not .Values.persistence.enabled) -}} +etcd: disasterRecovery + Persistence must be enabled when disasterRecovery is enabled!! + Please enable persistence (--set persistence.enabled=true) +{{- end -}} +{{- end -}} + +{{- define "etcd.token.jwtToken" -}} +{{- if (include "etcd.token.createSecret" .) -}} +{{- $jwtToken := lookup "v1" "Secret" .Release.Namespace (printf "%s-jwt-token" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" ) -}} +{{- if $jwtToken -}} +{{ index $jwtToken "data" "jwt-token.pem" | b64dec }} +{{- else -}} +{{ genPrivateKey "rsa" }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/templates/configmap.yaml b/deployment/helm/milvus/charts/etcd/templates/configmap.yaml new file mode 100644 index 000000000..ca69d7f45 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "etcd.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + etcd.conf.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/cronjob.yaml b/deployment/helm/milvus/charts/etcd/templates/cronjob.yaml new file mode 100644 index 000000000..88faa3103 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/cronjob.yaml @@ -0,0 +1,137 @@ +{{- if .Values.disasterRecovery.enabled -}} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ printf "%s-snapshotter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + concurrencyPolicy: Forbid + schedule: {{ .Values.disasterRecovery.cronjob.schedule | quote }} + successfulJobsHistoryLimit: {{ .Values.disasterRecovery.cronjob.historyLimit }} + jobTemplate: + spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 12 }} + app.kubernetes.io/component: snapshotter + {{- if .Values.disasterRecovery.cronjob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.disasterRecovery.cronjob.podAnnotations "context" $) | nindent 12 }} + {{- end }} + spec: + {{- if .Values.disasterRecovery.cronjob.nodeSelector }} + nodeSelector: {{- toYaml .Values.disasterRecovery.cronjob.nodeSelector | nindent 12 }} + {{- end }} + {{- if .Values.disasterRecovery.cronjob.tolerations }} + tolerations: {{- toYaml .Values.disasterRecovery.cronjob.tolerations | nindent 12 }} + {{- end }} + {{- include "etcd.imagePullSecrets" . | nindent 10 }} + restartPolicy: OnFailure + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled (or .Values.podSecurityContext.enabled .Values.containerSecurityContext.enabled) }} + initContainers: + - name: volume-permissions + image: {{ include "etcd.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /snapshots + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.resources "context" $) | nindent 16 }} + {{- end }} + volumeMounts: + - name: snapshot-volume + mountPath: /snapshots + {{- end }} + containers: + - name: etcd-snapshotter + image: {{ include "etcd.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 16 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 16 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 16 }} + {{- else }} + command: + - /opt/bitnami/scripts/etcd/snapshot.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ETCDCTL_API + value: "3" + - name: ETCD_ON_K8S + value: "yes" + - name: MY_STS_NAME + value: {{ include "common.names.fullname" . | quote }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $etcdFullname := include "common.names.fullname" . }} + {{- $etcdHeadlessServiceName := (printf "%s-%s" $etcdFullname "headless" | trunc 63 | trimSuffix "-") }} + {{- $clusterDomain := .Values.clusterDomain }} + - name: ETCD_CLUSTER_DOMAIN + value: {{ printf "%s.%s.svc.%s" $etcdHeadlessServiceName $releaseNamespace $clusterDomain | quote }} + - name: ETCD_SNAPSHOT_HISTORY_LIMIT + value: {{ .Values.disasterRecovery.cronjob.snapshotHistoryLimit | quote }} + - name: ETCD_SNAPSHOTS_DIR + value: {{ .Values.disasterRecovery.cronjob.snapshotsDir | quote }} + {{- if .Values.auth.client.secureTransport }} + - name: ETCD_CERT_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.certFilename }}" + - name: ETCD_KEY_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}" + {{- if .Values.auth.client.enableAuthentication }} + - name: ETCD_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.client.caFilename }} + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- end }} + {{- end }} + {{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + - name: ETCD_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + key: {{ include "etcd.secretPasswordKey" . }} + {{- end }} + {{- if .Values.disasterRecovery.cronjob.resources }} + resources: {{- toYaml .Values.disasterRecovery.cronjob.resources | nindent 16 }} + {{- end }} + volumeMounts: + - name: snapshot-volume + mountPath: /snapshots + {{- if .Values.disasterRecovery.pvc.subPath }} + subPath: {{ .Values.disasterRecovery.pvc.subPath }} + {{- end }} + {{- if .Values.auth.client.secureTransport }} + - name: certs + mountPath: /opt/bitnami/etcd/certs/client + readOnly: true + {{- end }} + volumes: + {{- if .Values.auth.client.secureTransport }} + - name: certs + secret: + secretName: {{ required "A secret containing the client certificates is required" (tpl .Values.auth.client.existingSecret .) }} + defaultMode: 256 + {{- end }} + - name: snapshot-volume + persistentVolumeClaim: + claimName: {{ include "etcd.disasterRecovery.pvc.name" . }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/extra-list.yaml b/deployment/helm/milvus/charts/etcd/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/networkpolicy.yaml b/deployment/helm/milvus/charts/etcd/templates/networkpolicy.yaml new file mode 100644 index 000000000..a2e333bbe --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/networkpolicy.yaml @@ -0,0 +1,81 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 6 }} + {{- end }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.containerPorts.client }} + - port: {{ .Values.containerPorts.peer }} + to: + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.containerPorts.client }} + - port: {{ .Values.containerPorts.peer }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 2379 + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/pdb.yaml b/deployment/helm/milvus/charts/etcd/templates/pdb.yaml new file mode 100644 index 000000000..f06492882 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/podmonitor.yaml b/deployment/helm/milvus/charts/etcd/templates/podmonitor.yaml new file mode 100644 index 000000000..952e569b4 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/podmonitor.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.metrics.enabled .Values.metrics.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ ternary .Values.metrics.podMonitor.namespace .Release.Namespace (not (empty .Values.metrics.podMonitor.namespace)) }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.podMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: client + path: /metrics + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.podMonitor.scheme }} + scheme: {{ .Values.metrics.podMonitor.scheme }} + {{- end }} + {{- if .Values.metrics.podMonitor.tlsConfig }} + tlsConfig: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podMonitor.tlsConfig "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.podMonitor.relabelings }} + relabelings: + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/prometheusrule.yaml b/deployment/helm/milvus/charts/etcd/templates/prometheusrule.yaml new file mode 100644 index 000000000..58a5594b1 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/prometheusrule.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 6 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/secrets.yaml b/deployment/helm/milvus/charts/etcd/templates/secrets.yaml new file mode 100644 index 000000000..ea46c28cd --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if and (or .Values.auth.rbac.create .Values.auth.rbac.enabled) (not .Values.auth.rbac.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.auth.rbac.rootPassword }} + etcd-root-password: {{ .Values.auth.rbac.rootPassword | b64enc | quote }} + {{- else }} + etcd-root-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/serviceaccount.yaml b/deployment/helm/milvus/charts/etcd/templates/serviceaccount.yaml new file mode 100644 index 000000000..a5721db40 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/serviceaccount.yaml @@ -0,0 +1,24 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ include "etcd.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/snapshot-pvc.yaml b/deployment/helm/milvus/charts/etcd/templates/snapshot-pvc.yaml new file mode 100644 index 000000000..2415b891d --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/snapshot-pvc.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.disasterRecovery.enabled (not .Values.disasterRecovery.pvc.existingClaim) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ printf "%s-snapshotter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.disasterRecovery.pvc.size | quote }} + storageClassName: {{ .Values.disasterRecovery.pvc.storageClassName | quote }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/etcd/templates/statefulset.yaml b/deployment/helm/milvus/charts/etcd/templates/statefulset.yaml new file mode 100644 index 000000000..f276eb095 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/statefulset.yaml @@ -0,0 +1,427 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "etcd.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "etcd.token.createSecret" .) }} + checksum/token-secret: {{ include (print $.Template.BasePath "/token-secrets.yaml") . | sha256sum }} + {{- end }} + spec: + {{- include "etcd.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.runtimeClassName }} + runtimeClassName: {{ .Values.runtimeClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.shareProcessNamespace }} + {{- end }} + serviceAccountName: {{ include "etcd.serviceAccountName" $ | quote }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "etcd.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/etcd + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.volumePermissions.resources "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/etcd + {{- end }} + {{- end }} + containers: + {{- $replicaCount := int .Values.replicaCount }} + {{- $peerPort := int .Values.containerPorts.peer }} + {{- $etcdFullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $etcdHeadlessServiceName := (printf "%s-%s" $etcdFullname "headless" | trunc 63 | trimSuffix "-") }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $etcdPeerProtocol := include "etcd.peerProtocol" . }} + {{- $etcdClientProtocol := include "etcd.clientProtocol" . }} + - name: etcd + image: {{ include "etcd.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_STS_NAME + value: {{ include "common.names.fullname" . | quote }} + - name: ETCDCTL_API + value: "3" + - name: ETCD_ON_K8S + value: "yes" + - name: ETCD_START_FROM_SNAPSHOT + value: {{ ternary "yes" "no" .Values.startFromSnapshot.enabled | quote }} + - name: ETCD_DISASTER_RECOVERY + value: {{ ternary "yes" "no" .Values.disasterRecovery.enabled | quote }} + - name: ETCD_NAME + value: "$(MY_POD_NAME)" + - name: ETCD_DATA_DIR + value: "/bitnami/etcd/data" + - name: ETCD_LOG_LEVEL + value: {{ ternary "debug" .Values.logLevel .Values.image.debug | quote }} + - name: ALLOW_NONE_AUTHENTICATION + value: {{ ternary "yes" "no" (and (not (or .Values.auth.rbac.create .Values.auth.rbac.enabled)) .Values.auth.rbac.allowNoneAuthentication) | quote }} + {{- if or .Values.auth.rbac.create .Values.auth.rbac.enabled }} + - name: ETCD_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "etcd.secretName" . }} + key: {{ include "etcd.secretPasswordKey" . }} + {{- end }} + {{- if .Values.auth.token.enabled }} + - name: ETCD_AUTH_TOKEN + {{- if eq .Values.auth.token.type "jwt" }} + value: {{ printf "jwt,priv-key=/opt/bitnami/etcd/certs/token/%s,sign-method=%s,ttl=%s" .Values.auth.token.privateKey.filename .Values.auth.token.signMethod .Values.auth.token.ttl | quote }} + {{- else if eq .Values.auth.token.type "simple" }} + value: "simple" + {{- end }} + {{- end }} + - name: ETCD_ADVERTISE_CLIENT_URLS + value: "{{ $etcdClientProtocol }}://$(MY_POD_NAME).{{ $etcdHeadlessServiceName }}.{{ .Release.Namespace }}.svc.{{ $clusterDomain }}:{{ .Values.containerPorts.client }},{{ $etcdClientProtocol }}://{{ $etcdFullname }}.{{ .Release.Namespace }}.svc.{{ $clusterDomain }}:{{ coalesce .Values.service.ports.client .Values.service.port }}" + - name: ETCD_LISTEN_CLIENT_URLS + value: "{{ $etcdClientProtocol }}://0.0.0.0:{{ .Values.containerPorts.client }}" + - name: ETCD_INITIAL_ADVERTISE_PEER_URLS + value: "{{ $etcdPeerProtocol }}://$(MY_POD_NAME).{{ $etcdHeadlessServiceName }}.{{ .Release.Namespace }}.svc.{{ $clusterDomain }}:{{ .Values.containerPorts.peer }}" + - name: ETCD_LISTEN_PEER_URLS + value: "{{ $etcdPeerProtocol }}://0.0.0.0:{{ .Values.containerPorts.peer }}" + {{- if .Values.autoCompactionMode }} + - name: ETCD_AUTO_COMPACTION_MODE + value: {{ .Values.autoCompactionMode | quote }} + {{- end }} + {{- if .Values.autoCompactionRetention }} + - name: ETCD_AUTO_COMPACTION_RETENTION + value: {{ .Values.autoCompactionRetention | quote }} + {{- end }} + {{- if .Values.maxProcs }} + - name: GOMAXPROCS + value: {{ .Values.maxProcs }} + {{- end }} + {{- if gt $replicaCount 1 }} + - name: ETCD_INITIAL_CLUSTER_TOKEN + value: "etcd-cluster-k8s" + - name: ETCD_INITIAL_CLUSTER_STATE + value: {{ default (ternary "new" "existing" .Release.IsInstall) .Values.initialClusterState | quote }} + {{- $initialCluster := list }} + {{- range $e, $i := until $replicaCount }} + {{- $initialCluster = append $initialCluster (printf "%s-%d=%s://%s-%d.%s.%s.svc.%s:%d" $etcdFullname $i $etcdPeerProtocol $etcdFullname $i $etcdHeadlessServiceName $releaseNamespace $clusterDomain $peerPort) }} + {{- end }} + - name: ETCD_INITIAL_CLUSTER + value: {{ join "," $initialCluster | quote }} + {{- end }} + - name: ETCD_CLUSTER_DOMAIN + value: {{ printf "%s.%s.svc.%s" $etcdHeadlessServiceName $releaseNamespace $clusterDomain | quote }} + {{- if and .Values.auth.client.secureTransport .Values.auth.client.useAutoTLS }} + - name: ETCD_AUTO_TLS + value: "true" + {{- else if .Values.auth.client.secureTransport }} + - name: ETCD_CERT_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.certFilename }}" + - name: ETCD_KEY_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.certKeyFilename }}" + {{- if .Values.auth.client.enableAuthentication }} + - name: ETCD_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.client.caFilename }} + - name: ETCD_TRUSTED_CA_FILE + value: "/opt/bitnami/etcd/certs/client/{{ .Values.auth.client.caFilename | default "ca.crt" }}" + {{- end }} + {{- end }} + {{- if and .Values.auth.peer.secureTransport .Values.auth.peer.useAutoTLS }} + - name: ETCD_PEER_AUTO_TLS + value: "true" + {{- else if .Values.auth.peer.secureTransport }} + - name: ETCD_PEER_CERT_FILE + value: "/opt/bitnami/etcd/certs/peer/{{ .Values.auth.peer.certFilename }}" + - name: ETCD_PEER_KEY_FILE + value: "/opt/bitnami/etcd/certs/peer/{{ .Values.auth.peer.certKeyFilename }}" + {{- if .Values.auth.peer.enableAuthentication }} + - name: ETCD_PEER_CLIENT_CERT_AUTH + value: "true" + - name: ETCD_PEER_TRUSTED_CA_FILE + value: "/opt/bitnami/etcd/certs/peer/{{ .Values.auth.peer.caFilename | default "ca.crt" }}" + {{- else if .Values.auth.peer.caFilename }} + - name: ETCD_PEER_TRUSTED_CA_FILE + value: "/opt/bitnami/etcd/certs/peer/{{ .Values.auth.peer.caFilename | default "ca.crt" }}" + {{- end }} + {{- end }} + {{- if .Values.startFromSnapshot.enabled }} + - name: ETCD_INIT_SNAPSHOT_FILENAME + value: {{ .Values.startFromSnapshot.snapshotFilename | quote }} + - name: ETCD_INIT_SNAPSHOTS_DIR + value: {{ ternary "/snapshots" "/init-snapshot" (and .Values.disasterRecovery.enabled (not .Values.disasterRecovery.pvc.existingClaim)) | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: client + containerPort: {{ .Values.containerPorts.client }} + protocol: TCP + - name: peer + containerPort: {{ .Values.containerPorts.peer }} + protocol: TCP + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /opt/bitnami/scripts/etcd/healthcheck.sh + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /opt/bitnami/scripts/etcd/healthcheck.sh + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: + exec: + command: + - /opt/bitnami/scripts/etcd/healthcheck.sh + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- else if and (gt $replicaCount 1) .Values.removeMemberOnContainerTermination }} + lifecycle: + preStop: + exec: + command: + - /opt/bitnami/scripts/etcd/prestop.sh + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- include "common.tplvalues.render" (dict "value" .Values.resources "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/etcd + {{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") }} + - name: etcd-jwt-token + mountPath: /opt/bitnami/etcd/certs/token/ + readOnly: true + {{- end }} + {{- if or (and .Values.startFromSnapshot.enabled (not .Values.disasterRecovery.enabled)) (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled .Values.disasterRecovery.pvc.existingClaim) }} + - name: init-snapshot-volume + mountPath: /init-snapshot + {{- end }} + {{- if or .Values.disasterRecovery.enabled (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled) }} + - name: snapshot-volume + mountPath: /snapshots + {{- if .Values.disasterRecovery.pvc.subPath }} + subPath: {{ .Values.disasterRecovery.pvc.subPath }} + {{- end }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: etcd-config + mountPath: /opt/bitnami/etcd/conf/ + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + mountPath: /opt/bitnami/etcd/certs/client/ + readOnly: true + {{- end }} + {{- if or .Values.auth.peer.enableAuthentication (and .Values.auth.peer.secureTransport (not .Values.auth.peer.useAutoTLS )) }} + - name: etcd-peer-certs + mountPath: /opt/bitnami/etcd/certs/peer/ + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if and (eq .Values.auth.token.enabled true) (eq .Values.auth.token.type "jwt") }} + - name: etcd-jwt-token + secret: + secretName: {{ include "etcd.token.secretName" . }} + defaultMode: 256 + {{- end }} + {{- if or (and .Values.startFromSnapshot.enabled (not .Values.disasterRecovery.enabled)) (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled .Values.disasterRecovery.pvc.existingClaim) }} + - name: init-snapshot-volume + persistentVolumeClaim: + claimName: {{ .Values.startFromSnapshot.existingClaim }} + {{- end }} + {{- if or .Values.disasterRecovery.enabled (and .Values.disasterRecovery.enabled .Values.startFromSnapshot.enabled) }} + - name: snapshot-volume + persistentVolumeClaim: + claimName: {{ include "etcd.disasterRecovery.pvc.name" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: etcd-config + configMap: + name: {{ include "etcd.configmapName" . }} + {{- end }} + {{- if or .Values.auth.client.enableAuthentication (and .Values.auth.client.secureTransport (not .Values.auth.client.useAutoTLS )) }} + - name: etcd-client-certs + secret: + secretName: {{ required "A secret containing the client certificates is required" (tpl .Values.auth.client.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if or .Values.auth.peer.enableAuthentication (and .Values.auth.peer.secureTransport (not .Values.auth.peer.useAutoTLS )) }} + - name: etcd-peer-certs + secret: + secretName: {{ required "A secret containing the peer certificates is required" (tpl .Values.auth.peer.existingSecret .) }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/svc-headless.yaml b/deployment/helm/milvus/charts/etcd/templates/svc-headless.yaml new file mode 100644 index 000000000..0d4df3e0e --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/svc-headless.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.headless.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + {{- if .Values.service.clientPortNameOverride }} + {{- if .Values.auth.client.secureTransport }} + - name: {{ .Values.service.clientPortNameOverride }}-ssl + {{- else }} + - name: {{ .Values.service.clientPortNameOverride }} + {{- end }} + {{- else }} + - name: client + {{- end }} + port: {{ .Values.containerPorts.client }} + targetPort: client + {{- if .Values.service.peerPortNameOverride }} + {{- if .Values.auth.peer.secureTransport }} + - name: {{ .Values.service.peerPortNameOverride }}-ssl + {{- else }} + - name: {{ .Values.service.peerPortNameOverride }} + {{- end }} + {{- else }} + - name: peer + {{- end }} + port: {{ .Values.containerPorts.peer }} + targetPort: peer + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/deployment/helm/milvus/charts/etcd/templates/svc.yaml b/deployment/helm/milvus/charts/etcd/templates/svc.yaml new file mode 100644 index 000000000..abefe1415 --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/svc.yaml @@ -0,0 +1,62 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: {{ default "client" .Values.service.clientPortNameOverride | quote }} + port: {{ coalesce .Values.service.ports.client .Values.service.port }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty (coalesce .Values.service.nodePorts.client .Values.service.nodePorts.clientPort))) }} + nodePort: {{ coalesce .Values.service.nodePorts.client .Values.service.nodePorts.clientPort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: {{ default "peer" .Values.service.peerPortNameOverride | quote }} + port: {{ coalesce .Values.service.ports.peer .Values.service.peerPort }} + targetPort: peer + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty (coalesce .Values.service.nodePorts.peer .Values.service.nodePorts.peerPort))) }} + nodePort: {{ coalesce .Values.service.nodePorts.peer .Values.service.nodePorts.peerPort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/templates/token-secrets.yaml b/deployment/helm/milvus/charts/etcd/templates/token-secrets.yaml new file mode 100644 index 000000000..c0246fbaf --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/templates/token-secrets.yaml @@ -0,0 +1,14 @@ +{{- if (include "etcd.token.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-jwt-token" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + jwt-token.pem: {{ include "etcd.token.jwtToken" . | b64enc | quote }} +{{- end }} diff --git a/deployment/helm/milvus/charts/etcd/values.yaml b/deployment/helm/milvus/charts/etcd/values.yaml new file mode 100644 index 000000000..d657ac1bf --- /dev/null +++ b/deployment/helm/milvus/charts/etcd/values.yaml @@ -0,0 +1,906 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets [array] Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param commonLabels [object] Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations [object] Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy [array] Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section etcd parameters +## + +## Bitnami etcd image version +## ref: https://hub.docker.com/r/bitnami/etcd/tags/ +## @param image.registry etcd image registry +## @param image.repository etcd image name +## @param image.tag etcd image tag +## @param image.digest etcd image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## +image: + registry: docker.io + repository: bitnami/etcd + tag: 3.5.9-debian-11-r4 + digest: "" + ## @param image.pullPolicy etcd image pull policy + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## @param image.pullSecrets [array] etcd image pull secrets + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param image.debug Enable image debug mode + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + ## Role-based access control parameters + ## ref: https://etcd.io/docs/current/op-guide/authentication/ + ## + rbac: + ## @param auth.rbac.create Switch to enable RBAC authentication + ## + create: true + ## @param auth.rbac.allowNoneAuthentication Allow to use etcd without configuring RBAC authentication + ## + allowNoneAuthentication: true + ## @param auth.rbac.rootPassword Root user password. The root user is always `root` + ## + rootPassword: "" + ## @param auth.rbac.existingSecret Name of the existing secret containing credentials for the root user + ## + existingSecret: "" + ## @param auth.rbac.existingSecretPasswordKey Name of key containing password to be retrieved from the existing secret + ## + existingSecretPasswordKey: "" + ## Authentication token + ## ref: https://etcd.io/docs/latest/learning/design-auth-v3/#two-types-of-tokens-simple-and-jwt + ## + token: + ## @param auth.token.enabled Enables token authentication + ## + enabled: true + ## @param auth.token.type Authentication token type. Allowed values: 'simple' or 'jwt' + ## ref: https://etcd.io/docs/latest/op-guide/configuration/#--auth-token + ## + type: jwt + ## @param auth.token.privateKey.filename Name of the file containing the private key for signing the JWT token + ## @param auth.token.privateKey.existingSecret Name of the existing secret containing the private key for signing the JWT token + ## NOTE: Ignored if auth.token.type=simple + ## NOTE: A secret containing a private key will be auto-generated if an existing one is not provided. + ## + privateKey: + filename: jwt-token.pem + existingSecret: "" + ## @param auth.token.signMethod JWT token sign method + ## NOTE: Ignored if auth.token.type=simple + ## + signMethod: RS256 + ## @param auth.token.ttl JWT token TTL + ## NOTE: Ignored if auth.token.type=simple + ## + ttl: 10m + ## TLS authentication for client-to-server communications + ## ref: https://etcd.io/docs/current/op-guide/security/ + ## + client: + ## @param auth.client.secureTransport Switch to encrypt client-to-server communications using TLS certificates + ## + secureTransport: false + ## @param auth.client.useAutoTLS Switch to automatically create the TLS certificates + ## + useAutoTLS: false + ## @param auth.client.existingSecret Name of the existing secret containing the TLS certificates for client-to-server communications + ## + existingSecret: "" + ## @param auth.client.enableAuthentication Switch to enable host authentication using TLS certificates. Requires existing secret + ## + enableAuthentication: false + ## @param auth.client.certFilename Name of the file containing the client certificate + ## + certFilename: cert.pem + ## @param auth.client.certKeyFilename Name of the file containing the client certificate private key + ## + certKeyFilename: key.pem + ## @param auth.client.caFilename Name of the file containing the client CA certificate + ## If not specified and `auth.client.enableAuthentication=true` or `auth.rbac.enabled=true`, the default is is `ca.crt` + ## + caFilename: "" + ## TLS authentication for server-to-server communications + ## ref: https://etcd.io/docs/current/op-guide/security/ + ## + peer: + ## @param auth.peer.secureTransport Switch to encrypt server-to-server communications using TLS certificates + ## + secureTransport: false + ## @param auth.peer.useAutoTLS Switch to automatically create the TLS certificates + ## + useAutoTLS: false + ## @param auth.peer.existingSecret Name of the existing secret containing the TLS certificates for server-to-server communications + ## + existingSecret: "" + ## @param auth.peer.enableAuthentication Switch to enable host authentication using TLS certificates. Requires existing secret + ## + enableAuthentication: false + ## @param auth.peer.certFilename Name of the file containing the peer certificate + ## + certFilename: cert.pem + ## @param auth.peer.certKeyFilename Name of the file containing the peer certificate private key + ## + certKeyFilename: key.pem + ## @param auth.peer.caFilename Name of the file containing the peer CA certificate + ## If not specified and `auth.peer.enableAuthentication=true` or `rbac.enabled=true`, the default is is `ca.crt` + ## + caFilename: "" +## @param autoCompactionMode Auto compaction mode, by default periodic. Valid values: "periodic", "revision". +## - 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. 5m). +## - 'revision' for revision number based retention. +## +autoCompactionMode: "" +## @param autoCompactionRetention Auto compaction retention for mvcc key value store in hour, by default 0, means disabled +## +autoCompactionRetention: "" +## @param initialClusterState Initial cluster state. Allowed values: 'new' or 'existing' +## If this values is not set, the default values below are set: +## - 'new': when installing the chart ('helm install ...') +## - 'existing': when upgrading the chart ('helm upgrade ...') +## +initialClusterState: "" +## @param logLevel Sets the log level for the etcd process. Allowed values: 'debug', 'info', 'warn', 'error', 'panic', 'fatal' +## +logLevel: "info" +## @param maxProcs Limits the number of operating system threads that can execute user-level +## Go code simultaneously by setting GOMAXPROCS environment variable +## ref: https://golang.org/pkg/runtime +## +maxProcs: "" +## @param removeMemberOnContainerTermination Use a PreStop hook to remove the etcd members from the etcd cluster on container termination +## they the containers are terminated. Set to 'false' if appears an error-related member ID wasn't properly stored. +## NOTE: Ignored if lifecycleHooks is set or replicaCount=1 +## +removeMemberOnContainerTermination: true +## @param configuration etcd configuration. Specify content for etcd.conf.yml +## e.g: +## configuration: |- +## foo: bar +## baz: +## +configuration: "" +## @param existingConfigmap Existing ConfigMap with etcd configuration +## NOTE: When it's set the configuration parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars [array] Extra environment variables to be set on etcd container +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars +## +extraEnvVarsSecret: "" +## @param command [array] Default container command (useful when using custom images) +## +command: [] +## @param args [array] Default container args (useful when using custom images) +## +args: [] + +## @section etcd statefulset parameters +## + + +## @param replicaCount Number of etcd replicas to deploy +## +replicaCount: 1 +## Update strategy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## @param updateStrategy.type Update strategy type, can be set to RollingUpdate or OnDelete. +## +updateStrategy: + type: RollingUpdate +## @param podManagementPolicy Pod management policy for the etcd statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +## +podManagementPolicy: Parallel +## @param hostAliases [array] etcd pod host aliases +## ref: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param lifecycleHooks [object] Override default etcd container hooks +## +lifecycleHooks: {} +## etcd container ports to open +## @param containerPorts.client Client port to expose at container level +## @param containerPorts.peer Peer port to expose at container level +## +containerPorts: + client: 2379 + peer: 2380 +## etcd pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled etcd pods' Security Context +## @param podSecurityContext.fsGroup Set etcd pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## etcd containers' SecurityContext +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled etcd containers' Security Context +## @param containerSecurityContext.runAsUser Set etcd container's Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set etcd container's Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## etcd containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits [object] The resources limits for the etcd container +## @param resources.requests [object] The requested resources for the etcd container +## +resources: + ## Example: + ## limits: + ## cpu: 500m + ## memory: 1Gi + ## + limits: {} + requests: {} +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 60 +## @param customLivenessProbe [object] Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe [object] Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe [object] Override default startup probe +## +customStartupProbe: {} +## @param extraVolumes [array] Optionally specify extra list of additional volumes for etcd pods +## +extraVolumes: [] +## @param extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for etcd container(s) +## +extraVolumeMounts: [] +## @param initContainers [array] Add additional init containers to the etcd pods +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars [array] Add additional sidecar containers to the etcd pods +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param podAnnotations [object] Annotations for etcd pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podLabels [object] Extra labels for etcd pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. +## @param nodeAffinityPreset.values [array] Node label values to match. Ignored if `affinity` is set. +## +nodeAffinityPreset: + type: "" + ## e.g: + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## e.g: + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity [object] Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector [object] Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations [array] Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate +## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +## +terminationGracePeriodSeconds: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param priorityClassName Name of the priority class to be used by etcd pods +## Priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param runtimeClassName Name of the runtime class to be used by pod(s) +## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ +## +runtimeClassName: "" +## @param shareProcessNamespace Enable shared process namespace in a pod. +## If set to false (default), each container will run in separate namespace, etcd will have PID=1. +## If set to true, the /pause will run as init process and will reap any zombie PIDs, +## for example, generated by a custom exec probe running longer than a probe timeoutSeconds. +## Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ +## +shareProcessNamespace: false +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment +## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## The value is evaluated as a template +## +topologySpreadConstraints: [] +## persistentVolumeClaimRetentionPolicy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## @param persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet +## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced +## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted +persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain +## @section Traffic exposure parameters +## + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.enabled create second service if equal true + ## + enabled: true + ## @param service.clusterIP Kubernetes service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.ports.client etcd client port + ## @param service.ports.peer etcd peer port + ## + ports: + client: 2379 + peer: 2380 + ## @param service.nodePorts.client Specify the nodePort client value for the LoadBalancer and NodePort service types. + ## @param service.nodePorts.peer Specify the nodePort peer value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + peer: "" + ## @param service.clientPortNameOverride etcd client port name override + ## + clientPortNameOverride: "" + ## @param service.peerPortNameOverride etcd peer port name override + ## + peerPortNameOverride: "" + ## @param service.loadBalancerIP loadBalancerIP for the etcd service (optional, cloud specific) + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges [array] Load Balancer source ranges + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalIPs [array] External IPs + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param service.externalTrafficPolicy %%MAIN_CONTAINER_NAME%% service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.annotations [object] Additional annotations for the etcd service + ## + annotations: {} + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + +## @section Persistence parameters +## + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled If true, use a Persistent Volume Claim. If false, use emptyDir. + ## + enabled: true + ## @param persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## + ## @param persistence.annotations [object] Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels [object] Labels for the PVC + ## + labels: {} + ## @param persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for etcd data volume + ## + size: 8Gi + ## @param persistence.selector [object] Selector to match an existing Persistent Volume + ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image name + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r118 + digest: "" + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## + pullPolicy: IfNotPresent + ## @param volumePermissions.image.pullSecrets [array] Specify docker-registry secret names as an array + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits [object] Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests [object] Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 500m + ## memory: 1Gi + ## + limits: {} + requests: {} + +## @section Network Policy parameters +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## etcd is listening on. When true, etcd will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Metrics parameters +## + +metrics: + ## @param metrics.enabled Expose etcd metrics + ## + enabled: false + ## @param metrics.podAnnotations [object] Annotations for the Prometheus metrics on etcd pods + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.containerPorts.client }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace Namespace in which Prometheus is running + ## + namespace: monitoring + ## @param metrics.podMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## + scrapeTimeout: 30s + ## @param metrics.podMonitor.additionalLabels [object] Additional labels that can be used so PodMonitors will be discovered by Prometheus + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + ## @param metrics.podMonitor.scheme Scheme to use for scraping + ## + scheme: http + ## @param metrics.podMonitor.tlsConfig [object] TLS configuration used for scrape endpoints used by Prometheus + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + ## e.g: + ## tlsConfig: + ## ca: + ## secret: + ## name: existingSecretName + ## + tlsConfig: {} + ## @param metrics.podMonitor.relabelings [array] Prometheus relabeling rules + ## + relabelings: [] + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + # - alert: ETCD has no leader + # annotations: + # summary: "ETCD has no leader" + # description: "pod {{`{{`}} $labels.pod {{`}}`}} state error, can't connect leader" + # for: 1m + # expr: etcd_server_has_leader == 0 + # labels: + # severity: critical + # group: PaaS + ## + rules: [] + + +## @section Snapshotting parameters +## + +## Start a new etcd cluster recovering the data from an existing snapshot before bootstrapping +## +startFromSnapshot: + ## @param startFromSnapshot.enabled Initialize new cluster recovering an existing snapshot + ## + enabled: false + ## @param startFromSnapshot.existingClaim Existing PVC containing the etcd snapshot + ## + existingClaim: "" + ## @param startFromSnapshot.snapshotFilename Snapshot filename + ## + snapshotFilename: "" +## Enable auto disaster recovery by periodically snapshotting the keyspace: +## - It creates a cronjob to periodically snapshotting the keyspace +## - It also creates a ReadWriteMany PVC to store the snapshots +## If the cluster permanently loses more than (N-1)/2 members, it tries to +## recover itself from the last available snapshot. +## +disasterRecovery: + ## @param disasterRecovery.enabled Enable auto disaster recovery by periodically snapshotting the keyspace + ## + enabled: false + cronjob: + ## @param disasterRecovery.cronjob.schedule Schedule in Cron format to save snapshots + ## See https://en.wikipedia.org/wiki/Cron + ## + schedule: "*/30 * * * *" + ## @param disasterRecovery.cronjob.historyLimit Number of successful finished jobs to retain + ## + historyLimit: 1 + ## @param disasterRecovery.cronjob.snapshotHistoryLimit Number of etcd snapshots to retain, tagged by date + ## + snapshotHistoryLimit: 1 + ## @param disasterRecovery.cronjob.snapshotsDir Directory to store snapshots + ## + snapshotsDir: "/snapshots" + ## @param disasterRecovery.cronjob.podAnnotations [object] Pod annotations for cronjob pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## Configure resource requests and limits for snapshotter containers + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param disasterRecovery.cronjob.resources.limits [object] Cronjob container resource limits + ## @param disasterRecovery.cronjob.resources.requests [object] Cronjob container resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 500m + ## memory: 1Gi + ## + limits: {} + requests: {} + + ## @param disasterRecovery.cronjob.nodeSelector Node labels for cronjob pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param disasterRecovery.cronjob.tolerations Tolerations for cronjob pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + pvc: + ## @param disasterRecovery.pvc.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param disasterRecovery.pvc.size PVC Storage Request + ## + size: 2Gi + ## @param disasterRecovery.pvc.storageClassName Storage Class for snapshots volume + ## + storageClassName: nfs + ## @param disasterRecovery.pvc.subPath Path within the volume from which to mount + ## Useful if snapshots should only be stored in a subdirectory of the volume + ## + subPath: "" + +## @section Service account parameters +## + +serviceAccount: + ## @param serviceAccount.create Enable/disable service account creation + ## + create: false + ## @param serviceAccount.name Name of the service account to create or use + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Enable/disable auto mounting of service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations [object] Additional annotations to be included on the service account + ## + annotations: {} + ## @param serviceAccount.labels [object] Additional labels to be included on the service account + ## + labels: {} + +## @section Other parameters +## + +## etcd Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: true + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 51% + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" diff --git a/deployment/helm/milvus/charts/kafka/.helmignore b/deployment/helm/milvus/charts/kafka/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/helm/milvus/charts/kafka/Chart.lock b/deployment/helm/milvus/charts/kafka/Chart.lock new file mode 100644 index 000000000..6d70daf8d --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 8.1.2 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.12.0 +digest: sha256:903762070537232f45dacf1d3ab09c43a9fec56656c2c66c15fbb35b64b6678c +generated: "2022-03-17T20:28:29.202310166Z" diff --git a/deployment/helm/milvus/charts/kafka/Chart.yaml b/deployment/helm/milvus/charts/kafka/Chart.yaml new file mode 100644 index 000000000..02f500f2b --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/Chart.yaml @@ -0,0 +1,33 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.1.0 +dependencies: +- condition: zookeeper.enabled + name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 8.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Apache Kafka is a distributed streaming platform designed to build real-time + pipelines and can be used as a message broker or as a replacement for a log aggregation + solution for big data applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: kafka +sources: +- https://github.com/bitnami/bitnami-docker-kafka +- https://kafka.apache.org/ +version: 15.5.1 diff --git a/deployment/helm/milvus/charts/kafka/README.md b/deployment/helm/milvus/charts/kafka/README.md new file mode 100644 index 000000000..acdb5a8ab --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/README.md @@ -0,0 +1,965 @@ + + +# Apache Kafka packaged by Bitnami + +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### Kafka parameters + +| Name | Description | Value | +| ------------------------------------------ |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------| +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image repository | `bitnami/kafka` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.1.0-debian-10-r31` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `true` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `_10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `_1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to ZooKeeper | `6000` | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `authorizerClassName` | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties | `""` | +| `allowEveryoneIfNoAclFound` | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users | `true` | +| `superUsers` | You can add super users in server.properties | `User:admin` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.externalClientProtocol` | Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `""` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.sasl.mechanisms` | SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `plain` | +| `auth.sasl.jaas.clientUsers` | Kafka client user list | `["user"]` | +| `auth.sasl.jaas.clientPasswords` | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers | `[]` | +| `auth.sasl.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.sasl.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperUser` | Kafka ZooKeeper user for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperPassword` | Kafka ZooKeeper password for SASL authentication | `""` | +| `auth.sasl.jaas.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser | `""` | +| `auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem` | `jks` | +| `auth.tls.pemChainIncluded` | Flag to denote whether the TLS authority chain is included inside the PEM | `false` | +| `auth.tls.existingSecrets` | Array existing secrets containing the TLS certificates for the Kafka brokers | `[]` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` | `false` | +| `auth.tls.password` | Password to access the JKS files or PEM key when they are password-protected. | `""` | +| `auth.tls.existingSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `auth.tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` | `""` | +| `auth.tls.jksKeystoreSAN` | The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate | `""` | +| `auth.tls.jksTruststore` | The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore | `""` | +| `auth.tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `""` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| `command` | Override Kafka container command | `["/scripts/setup.sh"]` | +| `args` | Override Kafka container arguments | `[]` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of Kafka nodes | `1` | +| `minBrokerId` | Minimal broker.id value, nodes increment their `broker.id` respectively | `0` | +| `containerPorts.client` | Kafka client container port | `9092` | +| `containerPorts.internal` | Kafka inter-broker container port | `9093` | +| `containerPorts.external` | Kafka external container port | `9094` | +| `livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `hostAliases` | Kafka pods host aliases | `[]` | +| `hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `podLabels` | Extra labels for Kafka pods | `{}` | +| `podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Kafka statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------- | --------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.internal` | Kafka svc port for inter-broker connections | `9093` | +| `service.ports.external` | Kafka svc port for external connections | `9094` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.23.4-debian-10-r9` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `""` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `logPersistence.annotations` | Annotations for the PVC | `{}` | +| `logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r351` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.4.2-debian-10-r160` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.resources.limits` | The resources limits for the container | `{}` | +| `metrics.kafka.resources.requests` | The requested resources for the container | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.16.1-debian-10-r222` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resources.limits` | The resources limits for the JMX exporter container | `{}` | +| `metrics.jmx.resources.requests` | The requested resources for the JMX exporter container | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + + +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka provisioning topics | `[]` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.resources.limits` | The resources limits for the Kafka provisioning container | `{}` | +| `provisioning.resources.requests` | The requested resources for the Kafka provisioning container | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart | `true` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + bitnami/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml bitnami/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +Using `extraEnvVars` with `KAFKA_CFG_` is the preferred and simplest way to add custom Kafka parameters not otherwise specified in this chart. Alternatively, you can provide a *full* Kafka configuration using `config` or `existingConfigmap`. +Setting either `config` or `existingConfigmap` will cause the chart to disregard `KAFKA_CFG_` settings, which are used by many other Kafka-related chart values described above, as well as dynamically generated parameters such as `zookeeper.connect`. This can cause unexpected behavior. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +Learn more about how to configure Kafka to use the different authentication protocols in the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/kafka/administration/enable-security/). + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.sasl.jaas.clientUsers`/`auth.sasl.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.sasl.jaas.interBrokerUser`/`auth.sasl.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **can** create a secret per Kafka broker you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `auth.tls.existingSecrets` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.tls.password` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the commands below to create the secrets: + +```console +kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks +kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +If, for some reason (like using Cert-Manager) you can not use the default JKS secret scheme, you can use the additional parameters: + +- `auth.tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `auth.tls.password` +- `auth.tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`). +- `auth.tls.jksKeystoreSAN` if you want to use a SAN certificate for your brokers. Setting this parameter would mean that the chart expects a existing key in the `auth.tls.jksTruststoreSecret` with the `auth.tls.jksKeystoreSAN` value and use this as a keystore for **all** brokers +> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `auth.tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) + +> **Note**: The truststore/keystore from above **must** be protected with the same password as in `auth.tls.password` + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +You can deploy the chart with AclAuthorizer using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=sasl_tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +authorizerClassName=kafka.security.authorizer.AclAuthorizer +allowEveryoneIfNoAclFound=false +superUsers=User:admin +``` + +If you also enable exposing metrics using the Kafka exporter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are two ways of configuring external access. Using LoadBalancer services or using NodePort services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.ports.external=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.ports.external=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.service.nodePorts[0]='node-port-1' +externalAccess.service.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` or `externalAccess.service.useHostIPs` is provided. + +#### Name resolution with External-DNS + +You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod: + +```yaml +externalAccess: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" +``` + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB®: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: + - | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - | + apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - | + apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB® Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + +### To 14.0.0 + +In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +VS + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +... +provisioning: + image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes + +### To 13.0.0 + +This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/master/bitnami/zookeeper#to-700). + +### To 12.2.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.8.0 + +External access to brokers can now be achieved through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/deployment/helm/milvus/charts/kafka/charts/common/.helmignore b/deployment/helm/milvus/charts/kafka/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/milvus/charts/kafka/charts/common/Chart.yaml b/deployment/helm/milvus/charts/kafka/charts/common/Chart.yaml new file mode 100644 index 000000000..ae5cd04cf --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.12.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.12.0 diff --git a/deployment/helm/milvus/charts/kafka/charts/common/README.md b/deployment/helm/milvus/charts/kafka/charts/common/README.md new file mode 100644 index 000000000..0b3101448 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/README.md @@ -0,0 +1,346 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_affinities.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..189ea403d --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_capabilities.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..b94212bbe --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_capabilities.tpl @@ -0,0 +1,128 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_errors.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_images.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_images.tpl new file mode 100644 index 000000000..42ffbc722 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_ingress.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..8caf73a61 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_labels.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_names.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_names.tpl new file mode 100644 index 000000000..c8574d175 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_secrets.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a53fb44f7 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_storage.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_tplvalues.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_utils.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..ea083a249 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/_warnings.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_cassandra.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_mariadb.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_mongodb.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..a071ea4d3 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_postgresql.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_redis.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..5d72959b9 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_validations.tpl b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/common/values.yaml b/deployment/helm/milvus/charts/kafka/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/.helmignore b/deployment/helm/milvus/charts/kafka/charts/zookeeper/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/Chart.lock b/deployment/helm/milvus/charts/kafka/charts/zookeeper/Chart.lock new file mode 100644 index 000000000..64aa8f5b8 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.12.0 +digest: sha256:7e484480451778c273e7a165dbfaa5594ec1c9a63a114ce9d458626cadd28893 +generated: "2022-03-16T15:37:48.808974487Z" diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/Chart.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/Chart.yaml new file mode 100644 index 000000000..48137ff20 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.7.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png +keywords: +- zookeeper +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: zookeeper +sources: +- https://github.com/bitnami/bitnami-docker-zookeeper +- https://zookeeper.apache.org/ +version: 8.1.2 diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/README.md b/deployment/helm/milvus/charts/kafka/charts/zookeeper/README.md new file mode 100644 index 000000000..afa3b0b14 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,497 @@ + + +# Apache ZooKeeper packaged by Bitnami + +Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications. + +[Overview of Apache ZooKeeper](https://zookeeper.apache.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `namespaceOverride` | Override namespace for ZooKeeper resources | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| --------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.7.0-debian-10-r265` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enabled` | Enable ZooKeeper auth. It uses SASL/Digest-MD5 | `false` | +| `auth.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | ZooKeeper client service port | `2181` | +| `service.ports.tls` | ZooKeeper TLS service port | `3181` | +| `service.ports.follower` | ZooKeeper follower service port | `2888` | +| `service.ports.election` | ZooKeeper election service port | `3888` | +| `service.nodePorts.client` | Node port for clients | `""` | +| `service.nodePorts.tls` | Node port for TLS | `""` | +| `service.disableBaseClientPort` | Remove client port from service definitions. | `false` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.clusterIP` | ZooKeeper service Cluster IP | `""` | +| `service.loadBalancerIP` | ZooKeeper service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ZooKeeper service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ZooKeeper service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ZooKeeper service | `{}` | +| `service.extraPorts` | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for ZooKeeper pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- | +| `persistence.enabled` | Enable ZooKeeper data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use (only when deploying a single replica) | `""` | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `""` | +| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` | +| `persistence.dataLogDir.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r312` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- | +| `metrics.enabled` | Enable Prometheus to access ZooKeeper metrics endpoint | `false` | +| `metrics.containerPort` | ZooKeeper Prometheus Exporter container port | `9141` | +| `metrics.service.type` | ZooKeeper Prometheus Exporter service type | `ClusterIP` | +| `metrics.service.port` | ZooKeeper Prometheus Exporter service port | `9141` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +### TLS/SSL parameters + +| Name | Description | Value | +| -------------------------------- | ----------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `tls.client.enabled` | Enable TLS for client connections | `false` | +| `tls.client.autoGenerated` | Generate automatically self-signed TLS certificates for ZooKeeper client communications | `false` | +| `tls.client.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications | `""` | +| `tls.client.keystorePath` | Location of the KeyStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks` | +| `tls.client.truststorePath` | Location of the TrustStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` | +| `tls.client.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.client.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.client.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.quorum.enabled` | Enable TLS for quorum protocol | `false` | +| `tls.quorum.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.quorum.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol | `""` | +| `tls.quorum.keystorePath` | Location of the KeyStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks` | +| `tls.quorum.truststorePath` | Location of the TrustStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` | +| `tls.quorum.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `tls.resources.requests` | The requested resources for the TLS init container | `{}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + bitnami/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Configure log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. + +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. + +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +``` +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +``` + +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to + +```console +zookeeper.root.logger=INFO, CONSOLE +``` +the available appender is + +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Configure the data log directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 8.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `allowAnonymousLogin` is deprecated. +- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map. +- `service.port`, `service.tlsClientPort`, `service.followerPort`, and `service.electionPort` have been regrouped under the `service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- `podDisruptionBudget.*` parameters are renamed to `pdb.*`. + +### To 7.0.0 + +This new version renames the parameters used to configure TLS for both client and quorum. + +- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort` +- `service.tls.client_port` is renamed to `service.tlsClientPort` +- `service.tls.client_enable` is renamed to `tls.client.enabled` +- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath` +- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath` +- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword` +- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword` +- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled` +- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath` +- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath` +- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword` +- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword` + +### To 6.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/). + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/.helmignore b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/Chart.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/Chart.yaml new file mode 100644 index 000000000..ae5cd04cf --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.12.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.12.0 diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/README.md b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/README.md new file mode 100644 index 000000000..0b3101448 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/README.md @@ -0,0 +1,346 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..189ea403d --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..b94212bbe --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl @@ -0,0 +1,128 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl new file mode 100644 index 000000000..42ffbc722 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..8caf73a61 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl new file mode 100644 index 000000000..c8574d175 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a53fb44f7 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..ea083a249 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..a071ea4d3 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..5d72959b9 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/values.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/ci/values-with-auth-tls-and-metrics.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/ci/values-with-auth-tls-and-metrics.yaml new file mode 100644 index 000000000..c43be2eda --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/ci/values-with-auth-tls-and-metrics.yaml @@ -0,0 +1,20 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct +replicaCount: 3 +auth: + enabled: true + clientUser: foo + clientPassword: baz + serverUsers: foo,bar + serverPasswords: baz,qux +metrics: + enabled: true + serviceMonitor: + enabled: true +tls: + client: + enabled: true + autoGenerated: true + quorum: + enabled: true + autoGenerated: true diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/NOTES.txt b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100644 index 000000000..012af3bf9 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,78 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.auth.clientPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh + +{{- else }} + +ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPort }} & + zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }} + +{{- end }} +{{- end }} + +{{- include "zookeeper.validateValues" . }} +{{- include "zookeeper.checkRollingTags" . }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 000000000..e9c63c330 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,263 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ZooKeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "zookeeper.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return ZooKeeper Namespace to use +*/}} +{{- define "zookeeper.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper authentication credentials secret +*/}} +{{- define "zookeeper.secretName" -}} +{{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper authentication credentials secret object should be created +*/}} +{{- define "zookeeper.createSecret" -}} +{{- if and .Values.auth.enabled (empty .Values.auth.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return ZooKeeper client password +*/}} +{{- define "zookeeper.client.password" -}} +{{- if not (empty .Values.auth.clientPassword) -}} + {{- .Values.auth.clientPassword -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" (include "zookeeper.namespace" .) "Name" (printf "%s-auth" (include "common.names.fullname" .)) "Length" 10 "Key" "client-password") -}} +{{- end -}} +{{- end -}} + +{{/* +Return ZooKeeper server password +*/}} +{{- define "zookeeper.server.password" -}} +{{- if not (empty .Values.auth.serverPasswords) -}} + {{- .Values.auth.serverPasswords -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" (include "zookeeper.namespace" .) "Name" (printf "%s-auth" (include "common.names.fullname" .)) "Length" 10 "Key" "server-password") -}} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper configuration ConfigMap name +*/}} +{{- define "zookeeper.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ConfigMap object should be created for ZooKeeper configuration +*/}} +{{- define "zookeeper.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsSecret" -}} +{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper quorum TLS certificates +*/}} +{{- define "zookeeper.quorum.tlsSecretName" -}} +{{- $secretName := .Values.tls.quorum.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.quorum.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.quorum.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsSecret" -}} +{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper client TLS certificates +*/}} +{{- define "zookeeper.client.tlsSecretName" -}} +{{- $secretName := .Values.tls.client.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.client.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.client.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "zookeeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "zookeeper.validateValues.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.auth" -}} +{{- if and .Values.auth.enabled (or (not .Values.auth.clientUser) (not .Values.auth.serverUsers)) }} +zookeeper: auth.enabled + In order to enable authentication, you need to provide the list + of users to be created and the user to use for clients access. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Client TLS enabled +*/}} +{{- define "zookeeper.validateValues.client.tls" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }} +zookeeper: tls.client.enabled + In order to enable Client TLS encryption, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Quorum TLS enabled +*/}} +{{- define "zookeeper.validateValues.quorum.tls" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }} +zookeeper: tls.quorum.enabled + In order to enable Quorum TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/configmap.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100644 index 000000000..12b4f489f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "zookeeper.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/extra-list.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100644 index 000000000..5afc4b3e5 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }}-metrics + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100644 index 000000000..63532832c --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,41 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound connections to ZooKeeper + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.containerPorts.follower }} + - port: {{ .Values.containerPorts.election }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/pdb.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/pdb.yaml new file mode 100644 index 000000000..f7faf65f9 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml new file mode 100644 index 000000000..87dcd3565 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} + diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml new file mode 100644 index 000000000..ff7148dd3 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml @@ -0,0 +1,98 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + init-certs.sh: |- + #!/bin/bash + + {{- if .Values.tls.client.enabled }} + if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/client/tls.crt" \ + -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -inkey "/certs/client/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/client/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.client.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f "/certs/client/zookeeper.truststore.jks" ]] && [[ -f "/certs/client/zookeeper.keystore.jks" ]]; then + cp "/certs/client/zookeeper.truststore.jks" "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" + cp "/certs/client/zookeeper.keystore.jks" "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/quorum/tls.crt" \ + -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -inkey "/certs/quorum/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/quorum/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.quorum.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f "/certs/quorum/zookeeper.truststore.jks" ]] && [[ -f "/certs/quorum/zookeeper.keystore.jks" ]]; then + cp "/certs/quorum/zookeeper.truststore.jks" "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" + cp "/certs/quorum/zookeeper.keystore.jks" "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID + # check ZOO_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/zookeeper/data/myid" ]]; then + export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))" + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + fi + exec /entrypoint.sh /run.sh diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/secrets.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100644 index 000000000..cbf358828 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,58 @@ +{{- if (include "zookeeper.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "zookeeper.client.password" . | b64enc | quote }} + server-password: {{ include "zookeeper.server.password" . | b64enc | quote }} +--- +{{- end }} +{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-client-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }} +--- +{{- end }} +{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-quorum-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }} +--- +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100644 index 000000000..958a57ac2 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100644 index 000000000..2c8af3350 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "zookeeper.namespace" . }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100644 index 000000000..a960b5ce4 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,520 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "zookeeper.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }} + checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/zookeeper + find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if .Values.dataLogDir }} + mkdir -p {{ .Values.dataLogDir }} + find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + {{- if .Values.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }} + - name: init-certs + image: {{ include "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /scripts/init-certs.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: "keystore-password" + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: "truststore-password" + {{- end }} + {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: "keystore-password" + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: "truststore-password" + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/init-certs.sh + subPath: init-certs.sh + {{- if or .Values.tls.client.enabled }} + - name: client-certificates + mountPath: /certs/client + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + {{- end }} + {{- if or .Values.tls.quorum.enabled }} + - name: quorum-certificates + mountPath: /certs/quorum + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.containerPorts.client | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_PRE_ALLOC_SIZE + value: {{ .Values.preAllocSize | quote }} + - name: ZOO_SNAPCOUNT + value: {{ .Values.snapCount | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $minServerId := int .Values.minServerId }} + {{- $followerPort := int .Values.containerPorts.follower }} + {{- $electionPort := int .Values.containerPorts.election }} + {{- $releaseNamespace := include "zookeeper.namespace" . }} + {{- $zookeeperFullname := include "common.names.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.secretName" . }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.secretName" . }} + key: server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + {{- if .Values.log4jProp }} + - name: ZOO_LOG4J_PROP + value: {{ .Values.log4jProp | quote }} + {{- end }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.tls.client.enabled | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.tls.client.keystorePath | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.tls.client.truststorePath | quote }} + {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: "keystore-password" + {{- end }} + {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: "truststore-password" + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.tls.quorum.enabled | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.tls.quorum.keystorePath | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.tls.quorum.truststorePath | quote }} + {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: "keystore-password" + {{- end }} + {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: "truststore-password" + {{- end }} + {{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: client + containerPort: {{ .Values.containerPorts.client }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-tls + containerPort: {{ .Values.containerPorts.tls }} + {{- end }} + - name: follower + containerPort: {{ .Values.containerPorts.follower }} + - name: election + containerPort: {{ .Values.containerPorts.election }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + {{- if not .Values.service.disableBaseClientPort }} + port: client + {{- else }} + port: follower + {{- end }} + {{- else if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + readOnly: true + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "zookeeper.configmapName" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-certificates + secret: + secretName: {{ include "zookeeper.client.tlsSecretName" . }} + defaultMode: 256 + - name: client-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-certificates + secret: + secretName: {{ include "zookeeper.quorum.tlsSecretName" . }} + defaultMode: 256 + - name: quorum-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.dataLogDir.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100644 index 000000000..dff53ebb3 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/svc.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100644 index 000000000..76f461624 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{ end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }} + nodePort: {{ .Values.service.nodePorts.tls }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml new file mode 100644 index 000000000..92e585987 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml @@ -0,0 +1,56 @@ +{{- if (include "zookeeper.client.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-client-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-client-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +--- +{{- end }} +{{- if (include "zookeeper.quorum.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-quorum-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-quorum-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +--- +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/charts/zookeeper/values.yaml b/deployment/helm/milvus/charts/kafka/charts/zookeeper/values.yaml new file mode 100644 index 000000000..a5ddb0b95 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,807 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" + +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry ZooKeeper image registry +## @param image.repository ZooKeeper image repository +## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.7.0-debian-10-r320 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + ## @param auth.enabled Enable ZooKeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats +## +tickTime: 2000 +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 +## @param syncLimit How far out of date a server can be from a leader +## +syncLimit: 5 +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime +## +maxSessionTimeout: 40000 +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` +## +heapSize: 1024 +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 3 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging + ## + purgeInterval: 0 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs +## +logLevel: ERROR +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/geode/data +## log-level=info +## ... +## +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes +## +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] + +## @section Statefulset parameters + +## @param replicaCount Number of ZooKeeper nodes +## +replicaCount: 1 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port +## +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ZooKeeper containers +## @param resources.requests.memory The requested memory for the ZooKeeper containers +## @param resources.requests.cpu The requested cpu for the ZooKeeper containers +## +resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context +## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## + headless: + publishNotReadyAddresses: true + annotations: {} +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis™ is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r367 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration +## +metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## + enabled: false + ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port + ## + containerPort: 9141 + ## Service configuration + ## + service: + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type + ## + type: ClusterIP + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port + ## + port: 9141 + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical + ## + rules: [] + +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param tls.resources.limits The resources limits for the TLS init container + ## @param tls.resources.requests The requested resources for the TLS init container + ## + resources: + limits: {} + requests: {} diff --git a/deployment/helm/milvus/charts/kafka/ci/values-with-external-tls.yaml b/deployment/helm/milvus/charts/kafka/ci/values-with-external-tls.yaml new file mode 100644 index 000000000..ec478cbe6 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/ci/values-with-external-tls.yaml @@ -0,0 +1,15 @@ +# Test values file for generating hidden section of the yaml +# and check that the rendering is correct +replicaCount: 3 +auth: + externalClientProtocol: tls + tls: + type: jks + existingSecrets: + - kafka-secret-0 + - kafka-secret-1 + - kafka-secret-2 +externalAccess: + enabled: true + autoDiscovery: + enabled: true diff --git a/deployment/helm/milvus/charts/kafka/ci/values-with-metrics.yaml b/deployment/helm/milvus/charts/kafka/ci/values-with-metrics.yaml new file mode 100644 index 000000000..4e02b9ec0 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/ci/values-with-metrics.yaml @@ -0,0 +1,11 @@ +# Test values file for generating hidden section of the yaml +# and check that the rendering is correct +replicaCount: 3 +metrics: + kafka: + enabled: true + jmx: + enabled: true + serviceMonitor: + enabled: true + namespace: monitoring diff --git a/deployment/helm/milvus/charts/kafka/ci/values-with-sasl-auth.yaml b/deployment/helm/milvus/charts/kafka/ci/values-with-sasl-auth.yaml new file mode 100644 index 000000000..9896fc4b4 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/ci/values-with-sasl-auth.yaml @@ -0,0 +1,27 @@ +# Test values file for generating hidden section of the yaml +# and check that the rendering is correct +replicaCount: 3 +auth: + clientProtocol: sasl + interBrokerProtocol: sasl + sasl: + mechanisms: plain,scram-sha-256,scram-sha-512 + interBrokerMechanism: plain + jaas: + clientUsers: + - user1 + - user2 + clientPasswords: + - password1 + - password2 + interBrokerUser: admin + interBrokerPassword: adminpassword + zookeeperUser: zoouser + zookeeperPassword: zoopassword +zookeeper: + auth: + enabled: true + clientUser: zoouser + clientPassword: zoopassword + serverUsers: zoouser + serverPasswords: zoopassword diff --git a/deployment/helm/milvus/charts/kafka/ci/values-with-tls-jks-auth.yaml b/deployment/helm/milvus/charts/kafka/ci/values-with-tls-jks-auth.yaml new file mode 100644 index 000000000..71c4f7747 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/ci/values-with-tls-jks-auth.yaml @@ -0,0 +1,12 @@ +# Test values file for generating hidden section of the yaml +# and check that the rendering is correct +replicaCount: 3 +auth: + clientProtocol: tls + interBrokerProtocol: mtls + tls: + type: jks + existingSecrets: + - kafka-secret-0 + - kafka-secret-1 + - kafka-secret-2 diff --git a/deployment/helm/milvus/charts/kafka/ci/values-with-tls-pem-auth.yaml b/deployment/helm/milvus/charts/kafka/ci/values-with-tls-pem-auth.yaml new file mode 100644 index 000000000..76459a1c9 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/ci/values-with-tls-pem-auth.yaml @@ -0,0 +1,9 @@ +# Test values file for generating hidden section of the yaml +# and check that the rendering is correct +replicaCount: 3 +auth: + clientProtocol: tls + interBrokerProtocol: mtls + tls: + type: pem + autoGenerated: true diff --git a/deployment/helm/milvus/charts/kafka/templates/NOTES.txt b/deployment/helm/milvus/charts/kafka/templates/NOTES.txt new file mode 100644 index 000000000..d4d94b7bc --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/NOTES.txt @@ -0,0 +1,304 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh + +{{- else }} + +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms -}} +{{- $tlsEndpointIdentificationAlgorithm := default "" .Values.auth.tls.endpointIdentificationAlgorithm -}} +{{- $tlsPasswordSecret := printf "$(kubectl get secret %s --namespace %s -o jsonpath='{.data.password}' | base64 --decode | cut -d , -f 1)" .Values.auth.tls.existingSecret $releaseNamespace -}} +{{- $tlsPassword := ternary .Values.auth.tls.password $tlsPasswordSecret (eq .Values.auth.tls.existingSecret "") -}} +{{- $servicePort := int .Values.service.ports.client -}} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $externalClientProtocol "PLAINTEXT") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 --decode | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $clientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $clientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.client.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $clientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.clientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} + +{{- end }} + +To create a pod that you can use as a Kafka client run the following commands: + + kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/kafka_jaas.conf {{ $fullname }}-client:/tmp/kafka_jaas.conf + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) (eq .Values.auth.tls.type "jks") }} + kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks + {{- if eq .Values.auth.clientProtocol "mtls" }} + kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks + {{- end }} + {{- end }} + kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash + {{- if (include "kafka.client.saslAuthentication" .) }} + export KAFKA_OPTS="-Djava.security.auth.login.config=/tmp/kafka_jaas.conf" + {{- end }} + + PRODUCER: + kafka-console-producer.sh \ + {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--producer.config /tmp/client.properties \{{ end }} + --broker-list {{ join "," $brokerList }} \ + --topic test + + CONSUMER: + kafka-console-consumer.sh \ + {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--consumer.config /tmp/client.properties \{{ end }} + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ + --topic test \ + --from-beginning + +{{- if .Values.externalAccess.enabled }} + +To connect to your Kafka server from outside the cluster, follow the instructions below: + +{{- if eq "NodePort" .Values.externalAccess.service.type }} +{{- if .Values.externalAccess.service.domain }} + + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }} + +{{- else }} + + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners + +{{- end }} + + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.service.ports.external }} + +{{- end }} + +{{- if not (eq $clientProtocol $externalClientProtocol) }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 --decode | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $externalClientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $externalClientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.externalClient.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $externalClientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.externalClientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} + +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- include "kafka.checkRollingTags" . }} +{{- include "kafka.validateValues" . }} diff --git a/deployment/helm/milvus/charts/kafka/templates/_helpers.tpl b/deployment/helm/milvus/charts/kafka/templates/_helpers.tpl new file mode 100644 index 000000000..9e52d79b5 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/_helpers.tpl @@ -0,0 +1,474 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified zookeeper name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.zookeeper.fullname" -}} +{{- if .Values.zookeeper.fullnameOverride -}} +{{- .Values.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "kafka.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka image name +*/}} +{{- define "kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "kafka.externalAccess.autoDiscovery.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kafka.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create a default fully qualified Kafka exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.metrics.kafka.fullname" -}} + {{- printf "%s-exporter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* + Create the name of the service account to use for Kafka exporter pods + */}} +{{- define "kafka.metrics.kafka.serviceAccountName" -}} +{{- if .Values.metrics.kafka.serviceAccount.create -}} + {{ default (include "kafka.metrics.kafka.fullname" .) .Values.metrics.kafka.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.metrics.kafka.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka exporter image name +*/}} +{{- define "kafka.metrics.kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.kafka.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper JMX exporter image name +*/}} +{{- define "kafka.metrics.jmx.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.jmx.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kafka.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.externalAccess.autoDiscovery.image .Values.volumePermissions.image .Values.metrics.kafka.image .Values.metrics.jmx.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "kafka.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if authentication via SASL should be configured for client communications +*/}} +{{- define "kafka.client.saslAuthentication" -}} +{{- $saslProtocols := list "sasl" "sasl_tls" -}} +{{- if has .Values.auth.clientProtocol $saslProtocols -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if authentication via SASL should be configured for inter-broker communications +*/}} +{{- define "kafka.interBroker.saslAuthentication" -}} +{{- $saslProtocols := list "sasl" "sasl_tls" -}} +{{- if has .Values.auth.interBrokerProtocol $saslProtocols -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for client connections should be configured +*/}} +{{- define "kafka.client.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has .Values.auth.clientProtocol $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configured value for the external client protocol, defaults to the same value as clientProtocol +*/}} +{{- define "kafka.externalClientProtocol" -}} + {{- coalesce .Values.auth.externalClientProtocol .Values.auth.clientProtocol -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for external client connections should be configured +*/}} +{{- define "kafka.externalClient.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has (include "kafka.externalClientProtocol" . ) $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for inter broker communication connections should be configured +*/}} +{{- define "kafka.interBroker.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has .Values.auth.interBrokerProtocol $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS should be configured +*/}} +{{- define "kafka.tlsEncryption" -}} +{{- if or (include "kafka.client.tlsEncryption" .) (include "kafka.interBroker.tlsEncryption" .) (include "kafka.externalClient.tlsEncryption" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the type of listener +Usage: +{{ include "kafka.listenerType" ( dict "protocol" .Values.path.to.the.Value ) }} +*/}} +{{- define "kafka.listenerType" -}} +{{- if eq .protocol "plaintext" -}} +PLAINTEXT +{{- else if or (eq .protocol "tls") (eq .protocol "mtls") -}} +SSL +{{- else if eq .protocol "sasl_tls" -}} +SASL_SSL +{{- else if eq .protocol "sasl" -}} +SASL_PLAINTEXT +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka JAAS credentials secret +*/}} +{{- define "kafka.jaasSecretName" -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-jaas" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a JAAS credentials secret object should be created +*/}} +{{- define "kafka.createJaasSecret" -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if and (or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) (and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser)) (empty $secretName) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsSecret" -}} +{{- if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (eq .Values.auth.tls.type "pem") .Values.auth.tls.autoGenerated }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the secret name for the Kafka Provisioning client +*/}} +{{- define "kafka.client.passwordsSecretName" -}} +{{- if .Values.provisioning.auth.tls.passwordsSecret -}} + {{- printf "%s" (tpl .Values.provisioning.auth.tls.passwordsSecret $) -}} +{{- else -}} + {{- printf "%s-client-secret" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.createConfigmap" -}} +{{- if and .Values.config (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka log4j ConfigMap name. +*/}} +{{- define "kafka.log4j.configMapName" -}} +{{- if .Values.existingLog4jConfigMap -}} + {{- printf "%s" (tpl .Values.existingLog4jConfigMap $) -}} +{{- else -}} + {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a log4j ConfigMap object should be created. +*/}} +{{- define "kafka.log4j.createConfigMap" -}} +{{- if and .Values.log4j (not .Values.existingLog4jConfigMap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the SASL mechanism to use for the Kafka exporter to access Kafka +The exporter uses a different nomenclature so we need to do this hack +*/}} +{{- define "kafka.metrics.kafka.saslMechanism" -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms }} +{{- if contains "scram-sha-512" $saslMechanisms }} + {{- print "scram-sha512" -}} +{{- else if contains "scram-sha-256" $saslMechanisms }} + {{- print "scram-sha256" -}} +{{- else -}} + {{- print "plain" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.metrics.jmx.configmapName" -}} +{{- if .Values.metrics.jmx.existingConfigmap -}} + {{- printf "%s" (tpl .Values.metrics.jmx.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-jmx-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.metrics.jmx.createConfigmap" -}} +{{- if and .Values.metrics.jmx.enabled .Values.metrics.jmx.config (not .Values.metrics.jmx.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kafka.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "kafka.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "kafka.validateValues.authProtocols" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}} +{{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets.length" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsPasswords" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Authentication protocols for Kafka */}} +{{- define "kafka.validateValues.authProtocols" -}} +{{- $authProtocols := list "plaintext" "tls" "mtls" "sasl" "sasl_tls" -}} +{{- if or (not (has .Values.auth.clientProtocol $authProtocols)) (not (has .Values.auth.interBrokerProtocol $authProtocols)) (not (has (include "kafka.externalClientProtocol" . ) $authProtocols)) -}} +kafka: auth.clientProtocol auth.externalClientProtocol auth.interBrokerProtocol + Available authentication protocols are "plaintext", "tls", "mtls", "sasl" and "sasl_tls" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as NodePort list */}} +{{- define "kafka.validateValues.nodePortListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $nodePortListLength )) (eq .Values.externalAccess.service.type "NodePort") -}} +kafka: .Values.externalAccess.service.nodePorts + Number of replicas and nodePort array length must be the same. Currently: replicaCount = {{ $replicaCount }} and nodePorts = {{ $nodePortListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - service type for external access */}} +{{- define "kafka.validateValues.externalAccessServiceType" -}} +{{- if and (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) -}} +kafka: externalAccess.service.type + Available service type for external access are NodePort or LoadBalancer. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - RBAC should be enabled when autoDiscovery is enabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }} +kafka: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.service.loadBalancerNames or externalAccess.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and + "externalAccess.service.type=LoadBalancer" at least one of externalAccess.service.loadBalancerNames + or externalAccess.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}} +{{- define "kafka.validateValues.externalAccessServiceList" -}} +{{- $replicaCount := int .context.Values.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}} +{{- define "kafka.validateValues.saslMechanisms" -}} +{{- if and (or (.Values.auth.clientProtocol | regexFind "sasl") (.Values.auth.interBrokerProtocol | regexFind "sasl") (and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser)) (not .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + The SASL mechanisms are required when either auth.clientProtocol or auth.interBrokerProtocol use SASL or Zookeeper user is provided. +{{- end }} +{{- if not (contains .Values.auth.sasl.interBrokerMechanism .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + auth.sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at auth.saslMechanisms +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}} +{{- define "kafka.validateValues.tlsSecrets" -}} +{{- if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "jks") (empty .Values.auth.tls.existingSecrets) }} +kafka: auth.tls.existingSecrets + A secret containing the Kafka JKS keystores and truststore is required + when TLS encryption in enabled and TLS format is "JKS" +{{- else if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "pem") (empty .Values.auth.tls.existingSecrets) (not .Values.auth.tls.autoGenerated) }} +kafka: auth.tls.existingSecrets + A secret containing the Kafka TLS certificates and keys is required + when TLS encryption in enabled and TLS format is "PEM" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - The number of secrets containing TLS certs should be equal to the number of replicas */}} +{{- define "kafka.validateValues.tlsSecrets.length" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- if and (include "kafka.tlsEncryption" .) (not (empty .Values.auth.tls.existingSecrets)) }} +{{- $existingSecretsLength := len .Values.auth.tls.existingSecrets }} +{{- if ne $replicaCount $existingSecretsLength }} +kafka: .Values.auth.tls.existingSecrets + Number of replicas and existingSecrets array length must be the same. Currently: replicaCount = {{ $replicaCount }} and existingSecrets = {{ $existingSecretsLength }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}} +{{- define "kafka.validateValues.tlsPasswords" -}} +{{- if and (include "kafka.client.tlsEncryption" .) (not .Values.auth.tls.passwordsSecret) }} +{{- if or .Values.auth.tls.keyPasswordSecretKey .Values.auth.tls.keystorePasswordSecretKey .Values.auth.tls.truststorePasswordSecretKey }} +kafka: auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + must not be used without passwordsSecret setted. +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/templates/configmap.yaml b/deployment/helm/milvus/charts/kafka/templates/configmap.yaml new file mode 100644 index 000000000..509fd1c4f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "kafka.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + server.properties: |- + {{ .Values.config | nindent 4 }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/templates/extra-list.yaml b/deployment/helm/milvus/charts/kafka/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/jaas-secret.yaml b/deployment/helm/milvus/charts/kafka/templates/jaas-secret.yaml new file mode 100644 index 000000000..a42723e50 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/jaas-secret.yaml @@ -0,0 +1,38 @@ +{{- if (include "kafka.createJaasSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-jaas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + {{- $clientPasswords := .Values.auth.sasl.jaas.clientPasswords }} + {{- if $clientPasswords }} + client-passwords: {{ join "," $clientPasswords | b64enc | quote }} + {{- else }} + {{- $passwords := list }} + {{- range $clientUsers }} + {{- $passwords = append $passwords (randAlphaNum 10) }} + {{- end }} + client-passwords: {{ join "," $passwords | b64enc | quote }} + {{- end }} + {{- end }} + {{- $zookeeperUser := .Values.auth.sasl.jaas.zookeeperUser }} + {{- if and .Values.zookeeper.auth.enabled $zookeeperUser }} + {{- $zookeeperPassword := .Values.auth.sasl.jaas.zookeeperPassword }} + zookeeper-password: {{ default (randAlphaNum 10) $zookeeperPassword | b64enc | quote }} + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + {{- $interBrokerPassword := .Values.auth.sasl.jaas.interBrokerPassword }} + inter-broker-password: {{ default (randAlphaNum 10) $interBrokerPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/jmx-configmap.yaml b/deployment/helm/milvus/charts/kafka/templates/jmx-configmap.yaml new file mode 100644 index 000000000..465058070 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/jmx-configmap.yaml @@ -0,0 +1,60 @@ +{{- if (include "kafka.metrics.jmx.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + jmx-kafka-prometheus.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.config "context" $ ) | nindent 4 }} + rules: + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/templates/jmx-metrics-svc.yaml b/deployment/helm/milvus/charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100644 index 000000000..35c79f41f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-deployment.yaml b/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100644 index 000000000..783f384c7 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,168 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $servicePort := int .Values.service.ports.client -}} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "kafka.metrics.kafka.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.kafka.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.metrics.kafka.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.kafka.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.kafka.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.priorityClassName }} + priorityClassName: {{ .Values.metrics.kafka.priorityClassName }} + {{- end }} + {{- if .Values.metrics.kafka.schedulerName }} + schedulerName: {{ .Values.metrics.kafka.schedulerName }} + {{- end }} + {{- if .Values.metrics.kafka.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + {{- if .Values.metrics.kafka.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + {{- if .Values.metrics.kafka.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - kafka_exporter + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- range $i, $e := until $replicaCount }} + - --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + - --sasl.enabled + - --sasl.username="$SASL_USERNAME" + - --sasl.password="${SASL_USER_PASSWORD%%,*}" + - --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + - --tls.enabled + {{- if .Values.metrics.kafka.certificatesSecret }} + - --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} + - --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} + {{- if .Values.metrics.kafka.tlsCaSecret }} + - --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} + {{- else }} + - --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} + {{- end }} + - --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + env: + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.metrics.kafka.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + mountPath: /opt/bitnami/kafka-exporter/cacert/ + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.metrics.kafka.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + secret: + secretName: {{ .Values.metrics.kafka.tlsCaSecret }} + defaultMode: 0440 + {{- end }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-serviceaccount.yaml b/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-serviceaccount.yaml new file mode 100644 index 000000000..8dba0aae9 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.kafka.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.metrics.kafka.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-svc.yaml b/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100644 index 000000000..4f0da59e2 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }} + {{- if .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/kafka-provisioning-secret.yaml b/deployment/helm/milvus/charts/kafka/templates/kafka-provisioning-secret.yaml new file mode 100644 index 000000000..0b29f5d0e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/kafka-provisioning-secret.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.provisioning.enabled (include "kafka.client.tlsEncryption" .) (not .Values.provisioning.auth.tls.passwordsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kafka.client.passwordsSecretName" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }} + keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }} + key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/kafka-provisioning.yaml b/deployment/helm/milvus/charts/kafka/templates/kafka-provisioning.yaml new file mode 100644 index 000000000..ec23ede6f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/kafka-provisioning.yaml @@ -0,0 +1,189 @@ +{{- if .Values.provisioning.enabled }} +{{- $replicaCount := int .Values.replicaCount }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.provisioning.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.provisioning.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName | quote }} + {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + initContainers: + - name: wait-for-available-kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.client | int64 }}; + echo "Kafka is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: kafka-provisioning + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + . /opt/bitnami/scripts/libkafka.sh + export CLIENT_CONF="${CLIENT_CONF:-/opt/bitnami/kafka/config/client.properties}" + if [ ! -f "$CLIENT_CONF" ]; then + touch $CLIENT_CONF + + kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + fi + {{- $bootstrapServer := printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- range $topic := .Values.provisioning.topics }} + echo "Ensure topic '{{ $topic.name }}' exists" + /opt/bitnami/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server {{ $bootstrapServer }} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config $CLIENT_CONF \ + --topic {{ $topic.name }} + {{- end }} + echo "Provisioning succeeded" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + secret: + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/log4j-configmap.yaml b/deployment/helm/milvus/charts/kafka/templates/log4j-configmap.yaml new file mode 100644 index 000000000..8f7bc6c14 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.log4j "context" $ ) | nindent 4 }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/templates/networkpolicy-egress.yaml b/deployment/helm/milvus/charts/kafka/templates/networkpolicy-egress.yaml new file mode 100644 index 000000000..068024a0e --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/networkpolicy-egress.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/networkpolicy-ingress.yaml b/deployment/helm/milvus/charts/kafka/templates/networkpolicy-ingress.yaml new file mode 100644 index 000000000..258dcabb6 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/networkpolicy-ingress.yaml @@ -0,0 +1,53 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow client connections + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} + {{- end }} + {{- end }} + # Allow communication inter-broker + - ports: + - port: {{ .Values.containerPorts.internal }} + from: + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + # Allow External connection + {{- if .Values.externalAccess.enabled }} + - ports: + - port: {{ .Values.containerPorts.external }} + {{- if .Values.externalAccess.from }} + from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.enabled }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/poddisruptionbudget.yaml b/deployment/helm/milvus/charts/kafka/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..e0a60151d --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/role.yaml b/deployment/helm/milvus/charts/kafka/templates/role.yaml new file mode 100644 index 000000000..63215b3b8 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/role.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/deployment/helm/milvus/charts/kafka/templates/rolebinding.yaml b/deployment/helm/milvus/charts/kafka/templates/rolebinding.yaml new file mode 100644 index 000000000..5b6ae26f7 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/scripts-configmap.yaml b/deployment/helm/milvus/charts/kafka/templates/scripts-configmap.yaml new file mode 100644 index 000000000..61df920c5 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,182 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.ports.internal }} + {{- $clientPort := .Values.service.ports.client }} + {{- $jksTruststoreSecret := .Values.auth.tls.jksTruststoreSecret -}} + {{- $jksTruststore := .Values.auth.tls.jksTruststore -}} + {{- $jksKeystoreSAN := .Values.auth.tls.jksKeystoreSAN -}} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + if [[ -f "/bitnami/kafka/data/meta.properties" ]]; then + export KAFKA_CFG_BROKER_ID="$(grep "broker.id" /bitnami/kafka/data/meta.properties | awk -F '=' '{print $2}')" + else + export KAFKA_CFG_BROKER_ID="$((ID + {{ .Values.minBrokerId }}))" + fi + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_HOST=$(echo '{{ .Values.externalAccess.service.loadBalancerNames | default .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.ports.external }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if and .Values.externalAccess.service.usePodIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${MY_POD_IP}" + {{- else if or .Values.externalAccess.service.useHostIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${HOST_IP}" + {{- else if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_HOST=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ join "," .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_HOST}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + mkdir -p /opt/bitnami/kafka/config/certs + {{- if eq .Values.auth.tls.type "jks" }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs-${ID}" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs-${ID}/%s" (default "kafka.keystore.jks" $jksKeystoreSAN) | quote }} + {{- else }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs/%s" (default "kafka-${ID}.keystore.jks" $jksKeystoreSAN) | quote }} + {{- end }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + cp "$JKS_TRUSTSTORE" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "$JKS_KEYSTORE" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + + {{- else if eq .Values.auth.tls.type "pem" }} + + {{- if or (not (empty .Values.auth.tls.existingSecrets)) .Values.auth.tls.autoGenerated }} + PEM_CA="/certs-${ID}/ca.crt" + PEM_CERT="/certs-${ID}/tls.crt" + PEM_KEY="/certs-${ID}/tls.key" + {{- else }} + PEM_CA="/certs/kafka.truststore.pem" + PEM_CERT="/certs/kafka-${ID}.keystore.pem" + PEM_KEY="/certs/kafka-${ID}.keystore.key" + {{- end }} + if [[ -f "$PEM_CERT" ]] && [[ -f "$PEM_KEY" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + PEM_CA_LOCATION="${CERT_DIR}/kafka.truststore.pem" + PEM_CERT_LOCATION="${CERT_DIR}/kafka.keystore.pem" + {{- if .Values.auth.tls.pemChainIncluded }} + cat $PEM_CERT | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f ${CERT_DIR}/xx + FIND_CA_RESULT=$(find ${CERT_DIR} -not -name 'xx00' -name 'xx*') + if [[ $(echo $FIND_CA_RESULT | wc -l) < 1 ]]; then + echo "auth.tls.pemChainIncluded was set, but PEM chain only contained 1 cert" + exit 1 + fi + echo $FIND_CA_RESULT | sort | xargs cat >> "$PEM_CA_LOCATION" + cat ${CERT_DIR}/xx00 > "$PEM_CERT_LOCATION" + {{- else }} + if [[ -f "$PEM_CA" ]]; then + cp "$PEM_CA" "$PEM_CA_LOCATION" + cp "$PEM_CERT" "$PEM_CERT_LOCATION" + else + echo "PEM_CA not provided, and auth.tls.pemChainIncluded was not true. One of these values must be set when using PEM type for TLS." + exit 1 + fi + {{- end }} + + # Ensure the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "$PEM_KEY" > "/opt/bitnami/kafka/config/certs/kafka.keystore.key" + else + echo "Couldn't find the expected PEM files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.pem" + {{- end }} + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/deployment/helm/milvus/charts/kafka/templates/serviceaccount.yaml b/deployment/helm/milvus/charts/kafka/templates/serviceaccount.yaml new file mode 100644 index 000000000..73091f5d7 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/deployment/helm/milvus/charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100644 index 000000000..826baf9ee --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: kafka + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/servicemonitor-metrics.yaml b/deployment/helm/milvus/charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100644 index 000000000..1e428c618 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/statefulset.yaml b/deployment/helm/milvus/charts/kafka/templates/statefulset.yaml new file mode 100644 index 000000000..4fc7276be --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/statefulset.yaml @@ -0,0 +1,579 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.ports.internal }} +{{- $clientPort := .Values.service.ports.client }} +{{- $interBrokerProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.interBrokerProtocol) -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/jaas-secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createTlsSecret" .) }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostIPC: {{ .Values.hostIPC }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.initContainers }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + find "{{ .Values.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ printf "%s%s" (include "kafka.zookeeper.fullname" .) (tpl .Values.zookeeperChrootPath .) | quote }} + {{- else }} + value: {{ include "common.tplvalues.render" (dict "value" (join "," .Values.externalZookeeper.servers) "context" $) }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $externalClientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($externalClientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ upper .Values.auth.sasl.mechanisms | quote }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.sasl.interBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ join "," .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- if eq .Values.externalAccess.service.type "NodePort" }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ join "," .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" .Values.allowPlaintextListener | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.sasl.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.sasl.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: "SASL" + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.sasl.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_TLS_TYPE + value: {{ upper .Values.auth.tls.type | quote }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ default "" .Values.auth.tls.endpointIdentificationAlgorithm | quote }} + - name: KAFKA_TLS_CLIENT_AUTH + value: {{ ternary "required" "none" (or (eq (include "kafka.externalClientProtocol" . ) "mtls") (eq .Values.auth.clientProtocol "mtls")) | quote }} + - name: KAFKA_CERTIFICATE_PASSWORD + {{- if .Values.auth.tls.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.auth.tls.existingSecret }} + key: password + {{- else }} + value: {{ default "" .Values.auth.tls.password | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_VOLUME_DIR + value: {{ .Values.persistence.mountPath | quote }} + - name: KAFKA_LOG_DIR + value: {{ .Values.logPersistence.mountPath | quote }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + - name: KAFKA_CFG_AUTHORIZER_CLASS_NAME + value: {{ .Values.authorizerClassName | quote }} + - name: KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND + value: {{ .Values.allowEveryoneIfNoAclFound | quote }} + - name: KAFKA_CFG_SUPER_USERS + value: {{ .Values.superUsers | quote }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: kafka-client + containerPort: {{ .Values.containerPorts.client }} + - name: kafka-internal + containerPort: {{ .Values.containerPorts.internal }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: {{ .Values.containerPorts.external }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- else if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: {{ .Values.persistence.mountPath }}/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $_ := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + mountPath: /truststore + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - java + args: + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "common.names.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $secret := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ tpl $secret $ }} + defaultMode: 256 + {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $index }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + secret: + secretName: {{ .Values.auth.tls.jksTruststoreSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- end }} +{{- if not .Values.logPersistence.enabled }} + - name: logs + emptyDir: {} +{{- else if .Values.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.logPersistence.existingClaim .) }} +{{- end }} + {{- if or (and .Values.persistence.enabled (not .Values.persistence.existingClaim)) (and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim)) }} + volumeClaimTemplates: + {{- end }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} +{{- if and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim) }} + - metadata: + name: logs + {{- if .Values.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.logPersistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/svc-external-access.yaml b/deployment/helm/milvus/charts/kafka/templates/svc-external-access.yaml new file mode 100644 index 000000000..a7f93bf66 --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,60 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "common.names.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + namespace: {{ $root.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations $root.Values.externalAccess.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $root.Values.externalAccess.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $root.Values.externalAccess.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.externalAccess.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerIPs)) (eq (len $root.Values.externalAccess.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.ports.external }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/templates/svc-headless.yaml b/deployment/helm/milvus/charts/kafka/templates/svc-headless.yaml new file mode 100644 index 000000000..72f99604f --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.ports.internal }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/deployment/helm/milvus/charts/kafka/templates/svc.yaml b/deployment/helm/milvus/charts/kafka/templates/svc.yaml new file mode 100644 index 000000000..defb1a79a --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/svc.yaml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{ end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + - name: tcp-external + port: {{ .Values.service.ports.external }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/deployment/helm/milvus/charts/kafka/templates/tls-secrets.yaml b/deployment/helm/milvus/charts/kafka/templates/tls-secrets.yaml new file mode 100644 index 000000000..420b8dfcc --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/templates/tls-secrets.yaml @@ -0,0 +1,29 @@ +{{- if (include "kafka.createTlsSecret" .) }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $ca := genCA "kafka-ca" 365 }} +{{- range $i := until $replicaCount }} +{{- $replicaHost := printf "%s-%d.%s-headless" $fullname $i $fullname }} +{{- $altNames := list (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $replicaHost $releaseNamespace) (printf "%s.%s" $fullname $releaseNamespace) $replicaHost $fullname }} +{{- $cert := genSignedCert $replicaHost nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $i }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} +--- +{{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/kafka/values.yaml b/deployment/helm/milvus/charts/kafka/values.yaml new file mode 100644 index 000000000..9e6137c8b --- /dev/null +++ b/deployment/helm/milvus/charts/kafka/values.yaml @@ -0,0 +1,1581 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section Kafka parameters + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## @param image.registry Kafka image registry +## @param image.repository Kafka image repository +## @param image.tag Kafka image tag (immutable tags are recommended) +## @param image.pullPolicy Kafka image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 3.1.0-debian-10-r52 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified +## Specify content for server.properties +## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart) +## The server.properties is auto-generated based on other parameters when this parameter is not specified +## e.g: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +config: "" +## @param existingConfigmap ConfigMap with Kafka Configuration +## NOTE: This will override `config` AND any KAFKA_CFG_ environment variables +## +existingConfigmap: "" +## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers +## An optional log4j.properties file to overwrite the default of the Kafka brokers +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +log4j: "" +## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file +## The name of an existing ConfigMap containing a log4j.properties file +## NOTE: this will override `log4j` +## +existingLog4jConfigMap: "" +## @param heapOpts Kafka Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m +## @param deleteTopicEnable Switch to enable topic deletion or not +## +deleteTopicEnable: false +## @param autoCreateTopicsEnable Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments +## +autoCreateTopicsEnable: true +## @param logFlushIntervalMessages The number of messages to accept before forcing a flush of data to disk +## +logFlushIntervalMessages: _10000 +## @param logFlushIntervalMs The maximum amount of time a message can sit in a log before we force a flush +## +logFlushIntervalMs: 1000 +## @param logRetentionBytes A size-based retention policy for logs +## +logRetentionBytes: _1073741824 +## @param logRetentionCheckIntervalMs The interval at which log segments are checked to see if they can be deleted +## +logRetentionCheckIntervalMs: 300000 +## @param logRetentionHours The minimum age of a log file to be eligible for deletion due to age +## +logRetentionHours: 168 +## @param logSegmentBytes The maximum size of a log segment file. When this size is reached a new log segment will be created +## +logSegmentBytes: _1073741824 +## @param logsDirs A comma separated list of directories under which to store log files +## +logsDirs: /bitnami/kafka/data +## @param maxMessageBytes The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 +## @param defaultReplicationFactor Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 +## @param offsetsTopicReplicationFactor The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 +## @param transactionStateLogReplicationFactor The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 +## @param transactionStateLogMinIsr Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 +## @param numIoThreads The number of threads doing disk I/O +## +numIoThreads: 8 +## @param numNetworkThreads The number of threads handling network requests +## +numNetworkThreads: 3 +## @param numPartitions The default number of log partitions per topic +## +numPartitions: 1 +## @param numRecoveryThreadsPerDataDir The number of threads per data directory to be used for log recovery at startup and flushing at shutdown +## +numRecoveryThreadsPerDataDir: 1 +## @param socketReceiveBufferBytes The receive buffer (SO_RCVBUF) used by the socket server +## +socketReceiveBufferBytes: 102400 +## @param socketRequestMaxBytes The maximum size of a request that the socket server will accept (protection against OOM) +## +socketRequestMaxBytes: _104857600 +## @param socketSendBufferBytes The send buffer (SO_SNDBUF) used by the socket server +## +socketSendBufferBytes: 102400 +## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to ZooKeeper +## +zookeeperConnectionTimeoutMs: 6000 +## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace +## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect +## +zookeeperChrootPath: "" +## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties +## +authorizerClassName: "" +## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users +## +allowEveryoneIfNoAclFound: true +## @param superUsers You can add super users in server.properties +## +superUsers: User:admin +## Authentication parameters +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## @param auth.clientProtocol Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.externalClientProtocol Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.interBrokerProtocol Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## + clientProtocol: plaintext + # Note: empty by default for backwards compatibility reasons, find more information at + # https://github.com/bitnami/charts/pull/8902/ + externalClientProtocol: "" + interBrokerProtocol: plaintext + ## SASL configuration + ## + sasl: + ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` + ## + mechanisms: plain,scram-sha-256,scram-sha-512 + ## @param auth.sasl.interBrokerMechanism SASL mechanism for inter broker communication. + ## + interBrokerMechanism: plain + ## JAAS configuration for SASL authentication. + ## + jaas: + ## @param auth.sasl.jaas.clientUsers Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + ## @param auth.sasl.jaas.clientPasswords Kafka client passwords. This is mandatory if more than one user is specified in clientUsers + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + ## @param auth.sasl.jaas.interBrokerUser Kafka inter broker communication user for SASL authentication + ## + interBrokerUser: admin + ## @param auth.sasl.jaas.interBrokerPassword Kafka inter broker communication password for SASL authentication + ## + interBrokerPassword: "" + ## @param auth.sasl.jaas.zookeeperUser Kafka ZooKeeper user for SASL authentication + ## + zookeeperUser: "" + ## @param auth.sasl.jaas.zookeeperPassword Kafka ZooKeeper password for SASL authentication + ## + zookeeperPassword: "" + ## @param auth.sasl.jaas.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + existingSecret: "" + ## TLS configuration + ## + tls: + ## @param auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem` + ## + type: jks + ## @param auth.tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + ## @param auth.tls.existingSecrets Array existing secrets containing the TLS certificates for the Kafka brokers + ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. + ## Create these secrets following the steps below: + ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks + ## kubectl create secret generic SECRET_NAME_1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks + ## ... + ## + ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. + ## Create these secrets following the steps below: + ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA + ## 2) Rename your CA file to `kafka.ca.crt`. + ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker. + ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-0.tls.crt --from-file=tls.key=./kafka-0.tls.key + ## kubectl create secret generic SECRET_NAME_1 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-1.tls.crt --from-file=tls.key=./kafka-1.tls.key + ## ... + ## + existingSecrets: [] + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` + ## Note: ignored when using 'jks' format or `auth.tls.existingSecrets` is not empty + ## + autoGenerated: false + ## @param auth.tls.password Password to access the JKS files or PEM key when they are password-protected. + ## Note: ignored when using 'existingSecret'. + ## + password: "" + ## @param auth.tls.existingSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + existingSecret: "" + ## @param auth.tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreSecret: "" + ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate + ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services: + ## - kafka-0.kafka-headless.kafka.svc.cluster.local + ## - kafka-1.kafka-headless.kafka.svc.cluster.local + ## - kafka-2.kafka-headless.kafka.svc.cluster.local + ## Note: ignored when using 'pem' format for certificates. + ## + jksKeystoreSAN: "" + ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststore: "" + ## @param auth.tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate + ## Disable server host name verification by setting it to an empty string. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + endpointIdentificationAlgorithm: https +## @param listeners The address(es) the socket server listens on. Auto-calculated it's set to an empty array +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] +## @param advertisedListeners The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] +## @param listenerSecurityProtocolMap The protocol->listener mapping. Auto-calculated it's set to nil +## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +listenerSecurityProtocolMap: "" +## @param allowPlaintextListener Allow to use the PLAINTEXT listener +## +allowPlaintextListener: true +## @param interBrokerListenerName The listener that the brokers should communicate on +## +interBrokerListenerName: INTERNAL +## @param command Override Kafka container command +## +command: + - /scripts/setup.sh +## @param args Override Kafka container arguments +## +args: [] +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" + +## @section Statefulset parameters + +## @param replicaCount Number of Kafka nodes +## +replicaCount: 1 +## @param minBrokerId Minimal broker.id value, nodes increment their `broker.id` respectively +## Brokers increment their ID starting at this minimal value. +## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively. +## +minBrokerId: 0 +## @param containerPorts.client Kafka client container port +## @param containerPorts.internal Kafka inter-broker container port +## @param containerPorts.external Kafka external container port +## +containerPorts: + client: 9092 + internal: 9093 + external: 9094 +## Configure extra options for Kafka containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Kafka containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Kafka containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Kafka containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup +## +lifecycleHooks: {} +## Kafka resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + limits: {} + requests: {} +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable security context for the pods +## @param podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable Kafka containers' Security Context +## @param containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot +## e.g: +## containerSecurityContext: +## enabled: true +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## @param hostAliases Kafka pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param hostNetwork Specify if host network should be enabled for Kafka pods +## +hostNetwork: false +## @param hostIPC Specify if host IPC should be enabled for Kafka pods +## +hostIPC: false +## @param podLabels Extra labels for Kafka pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Extra annotations for Kafka pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: {} +## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate +## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +## +terminationGracePeriodSeconds: "" +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by kafka pods +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type Kafka statefulset strategy type +## @param updateStrategy.rollingUpdate Kafka statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## Kafka Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the Kafka pod +## @param pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas +## @param pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +## Service parameters +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.internal Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections + ## + ports: + client: 9092 + internal: 9093 + external: 9094 + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + client: "" + external: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] +## External Access to Kafka brokers configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs/ports by querying the K8s API + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.23.5-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container + ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container + ## + resources: + limits: {} + requests: {} + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort or LoadBalancer + ## + type: LoadBalancer + ## @param externalAccess.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort + ## If not specified, the container will try to get the kubernetes node external IP + ## + domain: "" + ## @param externalAccess.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the kafka. + ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## e.g: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## e.g: + ## - ipBlock: + ## cidr: 172.9.0.0/16 + ## except: + ## - 172.9.1.0/24 + ## + externalAccess: + from: [] + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: true + ## @param persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /bitnami/kafka +## Log Persistence parameters +## +logPersistence: + ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: false + ## @param logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/bitnami/kafka/logs + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r372 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## ServiceAccount for Kafka +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods + ## + create: true + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## @section Metrics parameters + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter + ## + kafka: + ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics + ## + enabled: false + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## @param metrics.kafka.image.registry Kafka exporter image registry + ## @param metrics.kafka.image.repository Kafka exporter image repository + ## @param metrics.kafka.image.tag Kafka exporter image tag (immutable tags are recommended) + ## @param metrics.kafka.image.pullPolicy Kafka exporter image pull policy + ## @param metrics.kafka.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.4.2-debian-10-r182 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files + ## for Kafka exporter client authentication + ## + certificatesSecret: "" + ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) + ## + tlsCert: cert-file + ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) + ## + tlsKey: key-file + ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication + ## + tlsCaSecret: "" + ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) + ## + tlsCaCert: ca-file + ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter + ## e.g: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + ## @param metrics.kafka.command Override Kafka exporter container command + ## + command: [] + ## @param metrics.kafka.args Override Kafka exporter container arguments + ## + args: [] + ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port + ## + containerPorts: + metrics: 9308 + ## Kafka exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.kafka.resources.limits The resources limits for the container + ## @param metrics.kafka.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context + ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.kafka.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.kafka.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.kafka.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param metrics.kafka.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka exporter service configuration + ## + service: + ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port + ## + ports: + metrics: 9308 + ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## Kafka exporter pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods + ## + create: true + ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template + ## + name: "" + ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Prometheus JMX exporter: exposes the majority of Kafkas metrics + ## + jmx: + ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus + ## + enabled: false + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## @param metrics.jmx.image.registry JMX exporter image registry + ## @param metrics.jmx.image.repository JMX exporter image repository + ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy + ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.16.1-debian-10-r245 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Prometheus JMX exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container + ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container + ## + resources: + limits: {} + requests: {} + ## Prometheus JMX exporter service configuration + ## + service: + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port + ## + ports: + metrics: 5556 + ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" + prometheus.io/path: "/" + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + ## @param metrics.jmx.config [string] Configuration file for JMX exporter + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + existingConfigmap: "" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Kafka provisioning parameters + +## Kafka provisioning +## +provisioning: + ## @param provisioning.enabled Enable kafka provisioning Job + ## + enabled: false + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## Note: ignored if auth.tls.clientProtocol different from one of these values: "tls" "mtls" "sasl_tls". + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain a public CA certificate, a public certificate and one private key. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified + ## + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified + ## + replicationFactor: 1 + ## @param provisioning.topics Kafka provisioning topics + ## - name: topic-name + ## partitions: 1 + ## replicationFactor: 1 + ## ## https://kafka.apache.org/documentation/#topicconfigs + ## config: + ## max.message.bytes: 64000 + ## flush.messages: 1 + ## + topics: [] + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container + ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container + ## + resources: + limits: {} + requests: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + +## @section ZooKeeper chart parameters + +## ZooKeeper chart configuration +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart + ## + enabled: true + ## @param zookeeper.replicaCount Number of ZooKeeper nodes + ## + replicaCount: 1 + ## ZooKeeper authenticaiton + ## + auth: + ## @param zookeeper.auth.enabled Enable ZooKeeper auth + ## + enabled: false + ## @param zookeeper.auth.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param zookeeper.auth.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param zookeeper.auth.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param zookeeper.auth.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## ZooKeeper Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) + ## @param zookeeper.persistence.storageClass Persistent Volume storage class + ## @param zookeeper.persistence.accessModes Persistent Volume access modes + ## @param zookeeper.persistence.size Persistent Volume size + ## + persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + +## External Zookeeper Configuration +## All of these values are only used if `zookeeper.enabled=false` +## +externalZookeeper: + ## @param externalZookeeper.servers List of external zookeeper servers to use + ## + servers: [] diff --git a/deployment/helm/milvus/charts/minio/.helmignore b/deployment/helm/milvus/charts/minio/.helmignore new file mode 100644 index 000000000..a9fe72788 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/deployment/helm/milvus/charts/minio/Chart.yaml b/deployment/helm/milvus/charts/minio/Chart.yaml new file mode 100644 index 000000000..7b6fbd328 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: master +description: High Performance, Kubernetes Native Object Storage +home: https://min.io +icon: https://min.io/resources/img/logo/MINIO_wordmark.png +keywords: +- storage +- object-storage +- S3 +maintainers: +- email: contact@milvus.io + name: contact +name: minio +sources: +- https://github.com/minio/minio +version: 8.0.17 diff --git a/deployment/helm/milvus/charts/minio/README.md b/deployment/helm/milvus/charts/minio/README.md new file mode 100644 index 000000000..e058c825f --- /dev/null +++ b/deployment/helm/milvus/charts/minio/README.md @@ -0,0 +1,425 @@ +# NOTE: **A new operator based [Helm Chart](https://github.com/minio/operator) replaces this project. This repository will be archived after April 2021** + +```bash +helm repo remove minio +helm repo add minio https://operator.min.io/ +helm install --namespace minio-operator --create-namespace --generate-name minio/minio-operator +kubectl apply -f https://github.com/minio/operator/blob/master/examples/tenant.yaml +``` + +# MinIO (legacy docs) +===== + +[MinIO](https://min.io) is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. + +MinIO supports [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide). In distributed mode, you can pool multiple drives (even on different machines) into a single object storage server. + +For more detailed documentation please visit [here](https://docs.minio.io/) + +Introduction +------------ + +This chart bootstraps MinIO deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Prerequisites +------------- + +- Kubernetes 1.4+ with Beta APIs enabled for default standalone mode. +- Kubernetes 1.5+ with Beta APIs enabled to run MinIO in [distributed mode](#distributed-minio). +- PV provisioner support in the underlying infrastructure. + +Configure MinIO Helm repo +-------------------- +```bash +$ helm repo add minio https://helm.min.io/ +``` + +Installing the Chart +-------------------- + +Install this chart using: + +```bash +$ helm install --namespace minio --generate-name minio/minio +``` + +The command deploys MinIO on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Release name + +An instance of a chart running in a Kubernetes cluster is called a release. Each release is identified by a unique name within the cluster. Helm automatically assigns a unique release name after installing the chart. You can also set your preferred name by: + +```bash +$ helm install my-release minio/minio +``` + +### Access and Secret keys + +By default a pre-generated access and secret key will be used. To override the default keys, pass the access and secret keys as arguments to helm install. + +```bash +$ helm install --set accessKey=myaccesskey,secretKey=mysecretkey --generate-name minio/minio +``` + +### Updating MinIO configuration via Helm + +[ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap) allows injecting containers with configuration data even while a Helm release is deployed. + +To update your MinIO server configuration while it is deployed in a release, you need to + +1. Check all the configurable values in the MinIO chart using `helm inspect values minio/minio`. +2. Override the `minio_server_config` settings in a YAML formatted file, and then pass that file like this `helm upgrade -f config.yaml minio/minio`. +3. Restart the MinIO server(s) for the changes to take effect. + +You can also check the history of upgrades to a release using `helm history my-release`. Replace `my-release` with the actual release name. + +### Installing certificates from third party CAs + +MinIO can connect to other servers, including MinIO nodes or other server types such as NATs and Redis. If these servers use certificates that were not registered with a known CA, add trust for these certificates to MinIO Server by bundling these certificates into a Kubernetes secret and providing it to Helm via the `trustedCertsSecret` value. If `.Values.tls.enabled` is `true` and you're installing certificates for third party CAs, remember to include Minio's own certificate with key `public.crt`, if it also needs to be trusted. + +For instance, given that TLS is enabled and you need to add trust for Minio's own CA and for the CA of a Keycloak server, a Kubernetes secret can be created from the certificate files using `kubectl`: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=public.crt --from-file=keycloak.crt +``` + +If TLS is not enabled, you would need only the third party CA: + +``` +kubectl -n minio create secret generic minio-trusted-certs --from-file=keycloak.crt +``` + +The name of the generated secret can then be passed to Helm using a values file or the `--set` parameter: + +``` +trustedCertsSecret: "minio-trusted-certs" + +or + +--set trustedCertsSecret=minio-trusted-certs +``` + +Uninstalling the Chart +---------------------- + +Assuming your release is named as `my-release`, delete it using the command: + +```bash +$ helm delete my-release +``` + +or + +```bash +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +Upgrading the Chart +------------------- + +You can use Helm to update MinIO version in a live release. Assuming your release is named as `my-release`, get the values using the command: + +```bash +$ helm get values my-release > old_values.yaml +``` + +Then change the field `image.tag` in `old_values.yaml` file with MinIO image tag you want to use. Now update the chart using + +```bash +$ helm upgrade -f old_values.yaml my-release minio/minio +``` + +Default upgrade strategies are specified in the `values.yaml` file. Update these fields if you'd like to use a different strategy. + +Configuration +------------- + +The following table lists the configurable parameters of the MinIO chart and their default values. + +| Parameter | Description | Default | +|:-------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------| +| `nameOverride` | Provide a name in place of `minio` | `""` | +| `fullnameOverride` | Provide a name to substitute for the full names of resources | `""` | +| `image.repository` | Image repository | `minio/minio` | +| `image.tag` | MinIO image tag. Possible values listed [here](https://hub.docker.com/r/minio/minio/tags/). | `RELEASE.2020-11-06T23-17-07Z` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `imagePullSecrets` | List of container registry secrets | `[]` | +| `mcImage.repository` | Client image repository | `minio/mc` | +| `mcImage.tag` | mc image tag. Possible values listed [here](https://hub.docker.com/r/minio/mc/tags/). | `RELEASE.2020-10-03T02-54-56Z` | +| `mcImage.pullPolicy` | mc Image pull policy | `IfNotPresent` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.labels ` | Ingress labels | `{}` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.hosts` | Ingress accepted hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `trustedCertsSecret` | Kubernetes secret with trusted certificates to be mounted on `{{ .Values.certsPath }}/CAs` | `""` | +| `mode` | MinIO server mode (`standalone` or `distributed`) | `standalone` | +| `extraArgs` | Additional command line arguments to pass to the MinIO server | `[]` | +| `replicas` | Number of nodes (applicable only for MinIO distributed mode). | `4` | +| `zones` | Number of zones (applicable only for MinIO distributed mode). | `1` | +| `drivesPerNode` | Number of drives per node (applicable only for MinIO distributed mode). | `1` | +| `existingSecret` | Name of existing secret with access and secret key. | `""` | +| `accessKey` | Default access key (5 to 20 characters) | random 20 chars | +| `secretKey` | Default secret key (8 to 40 characters) | random 40 chars | +| `certsPath` | Default certs path location | `/etc/minio/certs` | +| `configPathmc` | Default config file location for MinIO client - mc | `/etc/minio/mc` | +| `mountPath` | Default mount location for persistent drive | `/export` | +| `bucketRoot` | Directory from where minio should serve buckets. | Value of `.mountPath` | +| `clusterDomain` | domain name of kubernetes cluster where pod is running. | `cluster.local` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `9000` | +| `service.externalIPs` | service external IP addresses | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `serviceAccount.create` | Toggle creation of new service account | `true` | +| `serviceAccount.name` | Name of service account to create and/or use | `""` | +| `persistence.enabled` | Use persistent volume to store data | `true` | +| `persistence.annotations` | PVC annotations | `{}` | +| `persistence.size` | Size of persistent volume claim | `500Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `persistence.storageClass` | Storage class name of PVC | `nil` | +| `persistence.accessMode` | ReadWriteOnce or ReadOnly | `ReadWriteOnce` | +| `persistence.subPath` | Mount a sub directory of the persistent volume if set | `""` | +| `resources.requests.memory` | Memory resource requests | Memory: `4Gi` | +| `priorityClassName` | Pod priority settings | `""` | +| `securityContext.enabled` | Enable to run containers as non-root. NOTE: if `persistence.enabled=false` then securityContext will be automatically disabled | `true` | +| `securityContext.runAsUser` | User id of the user for the container | `1000` | +| `securityContext.runAsGroup` | Group id of the user for the container | `1000` | +| `securityContext.fsGroup` | Group id of the persistent volume mount for the container | `1000` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `additionalLabels` | Additional labels for Deployment in standalone mode or StatefulSet in distributed mode | `[]` | +| `additionalAnnotations` | Additional annotations for Deployment in standalone mode or StatefulSet in distributed mode | `[]` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod Labels | `{}` | +| `tls.enabled` | Enable TLS for MinIO server | `false` | +| `tls.certSecret` | Kubernetes Secret with `public.crt` and `private.key` files. | `""` | +| `defaultBucket.enabled` | If set to true, a bucket will be created after MinIO install | `false` | +| `defaultBucket.name` | Bucket name | `bucket` | +| `defaultBucket.policy` | Bucket policy | `none` | +| `defaultBucket.purge` | Purge the bucket if already exists | `false` | +| `defaultBucket.versioning` | Enable / Suspend versioning for bucket | `nil` | +| `buckets` | List of buckets to create after MinIO install | `[]` | +| `makeBucketJob.annotations` | Additional annotations for the Kubernetes Batch (make-bucket-job) | `""` | +| `makeBucketJob.podAnnotations` | Additional annotations for the pods of the Kubernetes Batch (make-bucket-job) | `""` | +| `makeBucketJob.securityContext.enabled` | Enable to run Kubernetes Batch (make-bucket-job) containers as non-root. | `false` | +| `makeBucketJob.securityContext.runAsUser` | User id of the user for the container | `1000` | +| `makeBucketJob.securityContext.runAsGroup` | Group id of the user for the container | `1000` | +| `makeBucketJob.securityContext.fsGroup` | Group id of the persistent volume mount for the container | `1000` | +| `makeBucketJob.resources.requests.memory` | Memory resource requests for 'make bucket' job | `128Mi` | +| `updatePrometheusJob.podAnnotations` | Additional annotations for the pods of the Kubernetes Batch (update-prometheus-secret) | `""` | +| `updatePrometheusJob.securityContext.enabled` | Enable to run Kubernetes Batch (update-prometheus-secret) containers as non-root. | `false` | +| `updatePrometheusJob.securityContext.runAsUser` | User id of the user for the container | `1000` | +| `updatePrometheusJob.securityContext.runAsGroup` | Group id of the user for the container | `1000` | +| `updatePrometheusJob.securityContext.fsGroup` | Group id of the persistent volume mount for the container | `1000` | +| `s3gateway.enabled` | Use MinIO as a [s3 gateway](https://github.com/minio/minio/blob/master/docs/gateway/s3.md) | `false` | +| `s3gateway.replicas` | Number of s3 gateway instances to run in parallel | `4` | +| `s3gateway.serviceEndpoint` | Endpoint to the S3 compatible service | `""` | +| `s3gateway.accessKey` | Access key of S3 compatible service | `""` | +| `s3gateway.secretKey` | Secret key of S3 compatible service | `""` | +| `azuregateway.enabled` | Use MinIO as an azure gateway | `false` | +| `azuregateway.replicas` | Number of azure gateway instances to run in parallel | `4` | +| `gcsgateway.enabled` | Use MinIO as a Google Cloud Storage gateway | `false` | +| `gcsgateway.gcsKeyJson` | credential json file of service account key | `""` | +| `gcsgateway.projectId` | Google cloud project id | `""` | +| `nasgateway.enabled` | Use MinIO as a NAS gateway | `false` +| `nasgateway.replicas` | Number of NAS gateway instances to be run in parallel on a PV | `4` | +| `environment` | Set MinIO server relevant environment variables in `values.yaml` file. MinIO containers will be passed these variables when they start. | `MINIO_STORAGE_CLASS_STANDARD: EC:4"` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.serviceMonitor.relabelConfigs` | Relabel configs that can be used on Endpoints | `{}` | +| `etcd.endpoints` | Endpoints of etcd | `[]` | +| `etcd.pathPrefix` | Prefix for all etcd keys | `""` | +| `etcd.corednsPathPrefix` | Prefix for all CoreDNS etcd keys | `""` | +| `etcd.clientCert` | Certificate used for SSL/TLS connections to etcd [(etcd Security)](https://etcd.io/docs/latest/op-guide/security/) | `""` | +| `etcd.clientCertKey` | Key for the certificate [(etcd Security)](https://etcd.io/docs/latest/op-guide/security/) | `""` | + +Some of the parameters above map to the env variables defined in the [MinIO DockerHub image](https://hub.docker.com/r/minio/minio/). + +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release --set persistence.size=1Ti minio/minio +``` + +The above command deploys MinIO server with a 1Ti backing persistent volume. + +Alternately, you can provide a YAML file that specifies parameter values while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml minio/minio +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +Distributed MinIO +----------- + +This chart provisions a MinIO server in standalone mode, by default. To provision MinIO server in [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide), set the `mode` field to `distributed`, + +```bash +$ helm install --set mode=distributed minio/minio +``` + +This provisions MinIO server in distributed mode with 4 nodes. To change the number of nodes in your distributed MinIO server, set the `replicas` field, + +```bash +$ helm install --set mode=distributed,replicas=8 minio/minio +``` + +This provisions MinIO server in distributed mode with 8 nodes. Note that the `replicas` value should be a minimum value of 4, there is no limit on number of servers you can run. + +You can also expand an existing deployment by adding new zones, following command will create a total of 16 nodes with each zone running 8 nodes. + +```bash +$ helm install --set mode=distributed,replicas=8,zones=2 minio/minio +``` + +### StatefulSet [limitations](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/#limitations) applicable to distributed MinIO + +1. StatefulSets need persistent storage, so the `persistence.enabled` flag is ignored when `mode` is set to `distributed`. +2. When uninstalling a distributed MinIO release, you'll need to manually delete volumes associated with the StatefulSet. + +NAS Gateway +----------- + +### Prerequisites + +MinIO in NAS gateway mode can be used to create multiple MinIO instances backed by single PV in `ReadWriteMany` mode. Currently few [Kubernetes volume plugins](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) support `ReadWriteMany` mode. To deploy MinIO NAS gateway with Helm chart you'll need to have a Persistent Volume running with one of the supported volume plugins. [This document](https://kubernetes.io/docs/concepts/storage/volumes/#nfs) +outlines steps to create a NFS PV in Kubernetes cluster. + +### Provision NAS Gateway MinIO instances + +To provision MinIO servers in NAS gateway mode, set the `nasgateway.enabled` field to `true`, + +```bash +$ helm install --set nasgateway.enabled=true minio/minio +``` + +This provisions 4 MinIO NAS gateway instances backed by single storage. To change the number of instances in your MinIO deployment, set the `replicas` field, + +```bash +$ helm install --set nasgateway.enabled=true,nasgateway.replicas=8 minio/minio +``` + +This provisions MinIO NAS gateway with 8 instances. + +Persistence +----------- + +This chart provisions a PersistentVolumeClaim and mounts corresponding persistent volume to default location `/export`. You'll need physical storage available in the Kubernetes cluster for this to work. If you'd rather use `emptyDir`, disable PersistentVolumeClaim by: + +```bash +$ helm install --set persistence.enabled=false minio/minio +``` + +> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."* + +Existing PersistentVolumeClaim +------------------------------ + +If a Persistent Volume Claim already exists, specify it during installation. + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install --set persistence.existingClaim=PVC_NAME minio/minio +``` + +NetworkPolicy +------------- + +To enable network policy for MinIO, +install [a networking plugin that implements the Kubernetes +NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, traffic will be limited to just port 9000. + +For more precise policy, set `networkPolicy.allowExternal=true`. This will +only allow pods with the generated client label to connect to MinIO. +This label will be displayed in the output of a successful install. + +Existing secret +--------------- + +Instead of having this chart create the secret for you, you can supply a preexisting secret, much +like an existing PersistentVolumeClaim. + +First, create the secret: +```bash +$ kubectl create secret generic my-minio-secret --from-literal=accesskey=foobarbaz --from-literal=secretkey=foobarbazqux +``` + +Then install the chart, specifying that you want to use an existing secret: +```bash +$ helm install --set existingSecret=my-minio-secret minio/minio +``` + +The following fields are expected in the secret: + +| .data. in Secret | Corresponding variable | Description | +|:---------------------------|:------------------------|:----------------------------------------------------------------------------------| +| `accesskey` | `accessKey` | Access key ID. Mandatory. | +| `secretkey` | `secretKey` | Secret key. Mandatory. | +| `gcs_key.json` | `gcsgateway.gcsKeyJson` | GCS key if you are using the GCS gateway feature. Optional | +| `awsAccessKeyId` | `s3gateway.accessKey` | S3 access key if you are using the S3 gateway feature. Optional | +| `awsSecretAccessKey` | `s3gateway.secretKey` | S3 secret key if you are using the S3 gateway feature. Optional | +| `etcd_client_cert.pem` | `etcd.clientCert` | Certificate for SSL/TLS connections to etcd. Optional | +| `etcd_client_cert_key.pem` | `etcd.clientCertKey` | Corresponding key for certificate above. Mandatory when etcd certificate defined. | + +All corresponding variables will be ignored in values file. + +Configure TLS +------------- + +To enable TLS for MinIO containers, acquire TLS certificates from a CA or create self-signed certificates. While creating / acquiring certificates ensure the corresponding domain names are set as per the standard [DNS naming conventions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-identity) in a Kubernetes StatefulSet (for a distributed MinIO setup). Then create a secret using + +```bash +$ kubectl create secret generic tls-ssl-minio --from-file=path/to/private.key --from-file=path/to/public.crt +``` + +Then install the chart, specifying that you want to use the TLS secret: + +```bash +$ helm install --set tls.enabled=true,tls.certSecret=tls-ssl-minio minio/minio +``` + +Pass environment variables to MinIO containers +---------------------------------------------- + +To pass environment variables to MinIO containers when deploying via Helm chart, use the below command line format + +```bash +$ helm install --set environment.MINIO_BROWSER=on,environment.MINIO_DOMAIN=domain-name minio/minio +``` + +You can add as many environment variables as required, using the above format. Just add `environment.=` under `set` flag. + +Create buckets after install +--------------------------- + +Install the chart, specifying the buckets you want to create after install: + +```bash +$ helm install --set buckets[0].name=bucket1,buckets[0].policy=none,buckets[0].purge=false minio/minio +``` + +Description of the configuration parameters used above - + +- `buckets[].name` - name of the bucket to create, must be a string with length > 0 +- `buckets[].policy` - can be one of none|download|upload|public +- `buckets[].purge` - purge if bucket exists already + diff --git a/deployment/helm/milvus/charts/minio/templates/NOTES.txt b/deployment/helm/milvus/charts/minio/templates/NOTES.txt new file mode 100644 index 000000000..679f1ea8a --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/NOTES.txt @@ -0,0 +1,47 @@ +{{- if eq .Values.service.type "ClusterIP" "NodePort" }} +Minio can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +To access Minio from localhost, run the below commands: + + 1. export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + + 2. kubectl port-forward $POD_NAME 9000 --namespace {{ .Release.Namespace }} + +Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ + +You can now access Minio server on http://localhost:9000. Follow the below steps to connect to Minio server with mc client: + + 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. Get the ACCESS_KEY=$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.accesskey}" | base64 --decode) and the SECRET_KEY=$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.secretkey}" | base64 --decode) + + 3. mc alias set {{ template "minio.fullname" . }}-local http://localhost:{{ .Values.service.port }} "$ACCESS_KEY" "$SECRET_KEY" --api s3v4 + + 4. mc ls {{ template "minio.fullname" . }}-local + +Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} +{{- if eq .Values.service.type "LoadBalancer" }} +Minio can be accessed via port {{ .Values.service.port }} on an external IP address. Get the service external IP address by: +kubectl get svc --namespace {{ .Release.Namespace }} -l app={{ template "minio.fullname" . }} + +Note that the public IP may take a couple of minutes to be available. + +You can now access Minio server on http://:9000. Follow the below steps to connect to Minio server with mc client: + + 1. Download the Minio mc client - https://docs.minio.io/docs/minio-client-quickstart-guide + + 2. Get the ACCESS_KEY=$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.accesskey}" | base64 --decode) and the SECRET_KEY=$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.secretkey}" | base64 --decode) + 3. mc alias set {{ template "minio.fullname" . }} http://:{{ .Values.service.port }} "$ACCESS_KEY" "$SECRET_KEY" --api s3v4 + + 4. mc ls {{ template "minio.fullname" . }} + +Alternately, you can use your browser or the Minio SDK to access the server - https://docs.minio.io/categories/17 +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "minio.fullname" . }}-client=true" +will be able to connect to this minio cluster. +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/_helper_create_bucket.txt b/deployment/helm/milvus/charts/minio/templates/_helper_create_bucket.txt new file mode 100644 index 000000000..ad2f546b7 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/_helper_create_bucket.txt @@ -0,0 +1,111 @@ +#!/bin/sh +set -e ; # Have script exit in the event of a failed command. + +{{- if .Values.configPathmc }} +MC_CONFIG_DIR="{{ .Values.configPathmc }}" +MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" +{{- else }} +MC="/usr/bin/mc --insecure" +{{- end }} + +# connectToMinio +# Use a check-sleep-check loop to wait for Minio service to be available +connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/accesskey) ; SECRET=$(cat /config/secretkey) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to Minio server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} config host add myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 +} + +# checkBucketExists ($bucket) +# Check if the bucket exists, by using the exit code of `mc ls` +checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? +} + +# createBucket ($bucket, $policy, $purge) +# Ensure bucket exists, purging if asked to +createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist + if ! checkBucketExists $BUCKET ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + + + # set versioning for bucket + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET +} + +# Try connecting to Minio instance +{{- if .Values.tls.enabled }} +scheme=https +{{- else }} +scheme=http +{{- end }} +connectToMinio $scheme + +{{- if or .Values.defaultBucket.enabled }} +# Create the bucket +createBucket {{ .Values.defaultBucket.name }} {{ .Values.defaultBucket.policy }} {{ .Values.defaultBucket.purge }} {{ .Values.defaultBucket.versioning }} +{{ else if .Values.buckets }} +# Create the buckets +{{- range .Values.buckets }} +createBucket {{ .name }} {{ .policy }} {{ .purge }} {{ .versioning }} +{{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/_helpers.tpl b/deployment/helm/milvus/charts/minio/templates/_helpers.tpl new file mode 100644 index 000000000..8ea4bca8a --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/_helpers.tpl @@ -0,0 +1,193 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "minio.pdb.apiVersion" -}} +{{- if semverCompare "<1.21-0" .Capabilities.KubeVersion.Version -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "minio.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "minio.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "minio.statefulset.apiVersion" -}} +{{- if semverCompare "<1.16-0" .Capabilities.KubeVersion.Version -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "minio.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Determine secret name. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.existingSecret -}} +{{- .Values.existingSecret }} +{{- else -}} +{{- include "minio.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Determine service account name for deployment or statefulset. +*/}} +{{- define "minio.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{- default (include "minio.fullname" .) .Values.serviceAccount.name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- default "default" .Values.serviceAccount.name -}} +{{- end -}} +{{- end -}} + +{{/* +Determine name for scc role and rolebinding +*/}} +{{- define "minio.sccRoleName" -}} +{{- printf "%s-%s" "scc" (include "minio.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Properly format optional additional arguments to Minio binary +*/}} +{{- define "minio.extraArgs" -}} +{{- range .Values.extraArgs -}} +{{ " " }}{{ . }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: + {{ toYaml .Values.imagePullSecrets }} +{{- end -}} +{{- end -}} + +{{/* +Formats volumeMount for Minio tls keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolumeMount" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + mountPath: {{ .Values.certsPath }} +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $casPath := printf "%s/CAs" .Values.certsPath | clean }} +- name: trusted-cert-secret-volume + mountPath: {{ $casPath }} +{{- end }} +{{- end -}} + +{{/* +Formats volume for Minio tls keys and trusted certs +*/}} +{{- define "minio.tlsKeysVolume" -}} +{{- if .Values.tls.enabled }} +- name: cert-secret-volume + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: public.crt + - key: {{ .Values.tls.privateKey }} + path: private.key +{{- end }} +{{- if or .Values.tls.enabled (ne .Values.trustedCertsSecret "") }} +{{- $certSecret := eq .Values.trustedCertsSecret "" | ternary .Values.tls.certSecret .Values.trustedCertsSecret }} +{{- $publicCrt := eq .Values.trustedCertsSecret "" | ternary .Values.tls.publicCrt "" }} +- name: trusted-cert-secret-volume + secret: + secretName: {{ $certSecret }} + {{- if ne $publicCrt "" }} + items: + - key: {{ $publicCrt }} + path: public.crt + {{- end }} +{{- end }} +{{- end -}} diff --git a/deployment/helm/milvus/charts/minio/templates/clusterroles.yaml b/deployment/helm/milvus/charts/minio/templates/clusterroles.yaml new file mode 100644 index 000000000..c4d9a9371 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/clusterroles.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "minio.serviceAccountName" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + resourceNames: + - {{ template "minio.fullname" . }} + verbs: + - use +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/configmap.yaml b/deployment/helm/milvus/charts/minio/templates/configmap.yaml new file mode 100644 index 000000000..cb11fcd7d --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + initialize: |- +{{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} diff --git a/deployment/helm/milvus/charts/minio/templates/deployment.yaml b/deployment/helm/milvus/charts/minio/templates/deployment.yaml new file mode 100644 index 000000000..ffa880b4c --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/deployment.yaml @@ -0,0 +1,242 @@ +{{- if eq .Values.mode "standalone" }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +apiVersion: {{ template "minio.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + strategy: + type: {{ .Values.DeploymentUpdate.type }} + {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} + maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} + {{- end}} + {{- if .Values.nasgateway.enabled }} + replicas: {{ .Values.nasgateway.replicas }} + {{- end }} + {{- if .Values.s3gateway.enabled }} + replicas: {{ .Values.s3gateway.replicas }} + {{- end }} + {{- if .Values.azuregateway.enabled }} + replicas: {{ .Values.azuregateway.replicas }} + {{- end }} + {{- if .Values.gcsgateway.enabled }} + replicas: {{ .Values.gcsgateway.replicas }} + {{- end }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + serviceAccountName: {{ include "minio.serviceAccountName" . | quote }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.s3gateway.enabled }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio -S {{ .Values.certsPath }} gateway s3 {{ .Values.s3gateway.serviceEndpoint }} {{- template "minio.extraArgs" . }}" + {{- else }} + {{- if .Values.azuregateway.enabled }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio -S {{ .Values.certsPath }} gateway azure {{- template "minio.extraArgs" . }}" + {{- else }} + {{- if .Values.gcsgateway.enabled }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio -S {{ .Values.certsPath }} gateway gcs {{ .Values.gcsgateway.projectId }} {{- template "minio.extraArgs" . }}" + {{- else }} + {{- if .Values.nasgateway.enabled }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio -S {{ .Values.certsPath }} gateway nas {{ $bucketRoot }} {{- template "minio.extraArgs" . }}" + {{- else }} + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio -S {{ .Values.certsPath }} server {{ $bucketRoot }} {{- template "minio.extraArgs" . }}" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + volumeMounts: + {{- if and .Values.persistence.enabled (not .Values.gcsgateway.enabled) (not .Values.azuregateway.enabled) (not .Values.s3gateway.enabled) }} + - name: export + mountPath: {{ .Values.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: "{{ .Values.persistence.subPath }}" + {{- end }} + {{- end }} + {{- if or .Values.gcsgateway.enabled .Values.etcd.clientCert .Values.etcd.clientCertKey }} + - name: minio-user + mountPath: "/etc/credentials" + readOnly: true + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: 9000 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /minio/health/live + port: {{ $scheme }} + scheme: {{ $scheme | upper }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: {{ $scheme }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: {{ $scheme }} + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: secretkey + {{- if and .Values.gcsgateway.enabled .Values.gcsgateway.gcsKeyJson }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/credentials/gcs_key.json" + {{- end }} + {{- if .Values.etcd.endpoints }} + - name: MINIO_ETCD_ENDPOINTS + value: {{ join "," .Values.etcd.endpoints | quote }} + {{- if .Values.etcd.clientCert }} + - name: MINIO_ETCD_CLIENT_CERT + value: "/etc/credentials/etcd_client_cert.pem" + {{- end }} + {{- if .Values.etcd.clientCertKey }} + - name: MINIO_ETCD_CLIENT_CERT_KEY + value: "/etc/credentials/etcd_client_cert_key.pem" + {{- end }} + {{- if .Values.etcd.pathPrefix }} + - name: MINIO_ETCD_PATH_PREFIX + value: {{ .Values.etcd.pathPrefix }} + {{- end }} + {{- if .Values.etcd.corednsPathPrefix }} + - name: MINIO_ETCD_COREDNS_PATH + value: {{ .Values.etcd.corednsPathPrefix }} + {{- end }} + {{- end }} + {{- if .Values.s3gateway.enabled -}} + {{- if or .Values.s3gateway.accessKey .Values.existingSecret }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: awsAccessKeyId + {{- end }} + {{- if or .Values.s3gateway.secretKey .Values.existingSecret }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: awsSecretAccessKey + {{- end }} + {{- end }} + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + {{- if and (not .Values.gcsgateway.enabled) (not .Values.azuregateway.enabled) (not .Values.s3gateway.enabled) }} + - name: export + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/ingress.yaml b/deployment/helm/milvus/charts/minio/templates/ingress.yaml new file mode 100644 index 000000000..2d9bbda05 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/ingress.yaml @@ -0,0 +1,45 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "minio.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: {{ template "minio.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- if . }} + host: {{ . | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/networkpolicy.yaml b/deployment/helm/milvus/charts/minio/templates/networkpolicy.yaml new file mode 100644 index 000000000..de57f485f --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/networkpolicy.yaml @@ -0,0 +1,25 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + ingress: + - ports: + - port: {{ .Values.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "minio.name" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/poddisruptionbudget.yaml b/deployment/helm/milvus/charts/minio/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..dce133579 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/poddisruptionbudget.yaml @@ -0,0 +1,13 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: {{ template "minio.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: minio + labels: + app: {{ template "minio.name" . }} +spec: + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + app: {{ template "minio.name" . }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/podmonitor.yaml b/deployment/helm/milvus/charts/minio/templates/podmonitor.yaml new file mode 100644 index 000000000..0bd969fb9 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/podmonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.metrics.podMonitor.enabled }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +spec: + podMetricsEndpoints: + - port: {{ $scheme }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.podMonitor.interval }} + interval: {{ .Values.metrics.podMonitor.interval }} + {{- end }} + {{- if .Values.metrics.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.podMonitor.scrapeTimeout }} + {{- end }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-podmonitor-prometheus + key: token + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/milvus/charts/minio/templates/post-install-create-bucket-job.yaml b/deployment/helm/milvus/charts/minio/templates/post-install-create-bucket-job.yaml new file mode 100644 index 000000000..27e30d4ab --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/post-install-create-bucket-job.yaml @@ -0,0 +1,87 @@ +{{- if or .Values.defaultBucket.enabled .Values.buckets }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "minio.fullname" . }}-make-bucket-job + labels: + app: {{ template "minio.name" . }}-make-bucket-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +{{- with .Values.makeBucketJob.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.podAnnotations }} + annotations: +{{ toYaml .Values.makeBucketJob.podAnnotations | indent 8 }} +{{- end }} + spec: + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.makeBucketJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.makeBucketJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: {{ template "minio.fullname" . }} + - secret: + name: {{ template "minio.secretName" . }} + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + secret: + secretName: {{ .Values.tls.certSecret }} + items: + - key: {{ .Values.tls.publicCrt }} + path: CAs/public.crt + {{ end }} + serviceAccountName: {{ include "minio.serviceAccountName" . | quote }} + containers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: {{ template "minio.fullname" . }} + - name: MINIO_PORT + value: {{ .Values.service.port | quote }} + volumeMounts: + - name: minio-configuration + mountPath: /config + {{- if .Values.tls.enabled }} + - name: cert-secret-volume-mc + mountPath: {{ .Values.configPathmc }}certs + {{ end }} + resources: +{{ toYaml .Values.makeBucketJob.resources | indent 10 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-job.yaml b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-job.yaml new file mode 100644 index 000000000..3091a9482 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-job.yaml @@ -0,0 +1,152 @@ +{{- if or .Values.metrics.serviceMonitor.enabled .Values.metrics.podMonitor.enabled }} +{{- $fullName := include "minio.fullname" . -}} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $fullName }}-update-prometheus-secret + labels: + app: {{ template "minio.name" . }}-update-prometheus-secret + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation + {{ toYaml .Values.updatePrometheusJob.annotations | indent 4 }} +spec: + template: + metadata: + labels: + app: {{ template "minio.name" . }}-update-prometheus-secret + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.updatePrometheusJob.podAnnotations }} + annotations: +{{ toYaml .Values.updatePrometheusJob.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- if .Values.serviceAccount.create }} + serviceAccountName: {{ $fullName }}-update-prometheus-secret +{{- end }} + restartPolicy: OnFailure +{{- include "minio.imagePullSecrets" . | indent 6 }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.updatePrometheusJob.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.updatePrometheusJob.securityContext.runAsUser }} + runAsGroup: {{ .Values.updatePrometheusJob.securityContext.runAsGroup }} + fsGroup: {{ .Values.updatePrometheusJob.securityContext.fsGroup }} +{{- end }} + volumes: + - name: workdir + emptyDir: {} + initContainers: + - name: minio-mc + image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" + imagePullPolicy: {{ .Values.mcImage.pullPolicy }} + command: + - /bin/sh + - "-c" + - mc --config-dir {{ .Values.configPathmc }} admin prometheus generate target --json --no-color -q > /workdir/mc.json + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: secretkey + # mc admin prometheus generate don't really connect to remote server, TLS cert isn't required + - name: MC_HOST_target + value: {{ $scheme }}://$(MINIO_ACCESS_KEY):$(MINIO_SECRET_KEY)@{{ $fullName }}:{{ .Values.service.port }} + volumeMounts: + - name: workdir + mountPath: /workdir + resources: +{{ toYaml .Values.resources | indent 12 }} + # extract bearerToken from mc admin output + - name: jq + image: "{{ .Values.helmKubectlJqImage.repository }}:{{ .Values.helmKubectlJqImage.tag }}" + imagePullPolicy: {{ .Values.helmKubectlJqImage.pullPolicy }} + command: + - /bin/sh + - "-c" + - jq -e -c -j -r .bearerToken < /workdir/mc.json > /workdir/token + volumeMounts: + - name: workdir + mountPath: /workdir + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: kubectl-create + image: "{{ .Values.helmKubectlJqImage.repository }}:{{ .Values.helmKubectlJqImage.tag }}" + imagePullPolicy: {{ .Values.helmKubectlJqImage.pullPolicy }} + command: ["/bin/sh", "-c"] + args: + # The following script does: + # - get the servicemonitor that need this secret and copy some metadata and create the ownerreference for the secret file + # - create the secret + # - merge both json + {{- if and .Values.metrics.serviceMonitor.enabled .Values.metrics.podMonitor.enabled }} + - | + mkdir -p /workdir/secrets && kubectl -n {{ .Release.Namespace }} get servicemonitor {{ $fullName }} -o json | + jq -c '{metadata: {name: "{{ $fullName }}-servicemonitor-prometheus", namespace: .metadata.namespace, labels: {app: .metadata.labels.app, release: .metadata.labels.release}, ownerReferences: [{apiVersion: .apiVersion, kind: .kind, blockOwnerDeletion: true, controller: true, uid: .metadata.uid, name: .metadata.name}]}}' > /workdir/servicemonitormetadata.json && + kubectl create secret generic {{ $fullName }}-servicemonitor-prometheus --from-file=token=/workdir/token --dry-run -o json > /workdir/servicemonitorsecret.json && + cat /workdir/servicemonitorsecret.json /workdir/servicemonitormetadata.json | jq -s add > /workdir/secrets/servicemonitorobject.json; + mkdir -p /workdir/secrets && kubectl -n {{ .Release.Namespace }} get podmonitor {{ $fullName }} -o json | + jq -c '{metadata: {name: "{{ $fullName }}-podmonitor-prometheus", namespace: .metadata.namespace, labels: {app: .metadata.labels.app, release: .metadata.labels.release}, ownerReferences: [{apiVersion: .apiVersion, kind: .kind, blockOwnerDeletion: true, controller: true, uid: .metadata.uid, name: .metadata.name}]}}' > /workdir/podmonitormetadata.json && + kubectl create secret generic {{ $fullName }}-podmonitor-prometheus --from-file=token=/workdir/token --dry-run -o json > /workdir/podmonitorsecret.json && + cat /workdir/podmonitorsecret.json /workdir/podmonitormetadata.json | jq -s add > /workdir/secrets/podmonitorobject.json + {{- else if .Values.metrics.podMonitor.enabled }} + - | + mkdir -p /workdir/secrets && kubectl -n {{ .Release.Namespace }} get podmonitor {{ $fullName }} -o json | + jq -c '{metadata: {name: "{{ $fullName }}-podmonitor-prometheus", namespace: .metadata.namespace, labels: {app: .metadata.labels.app, release: .metadata.labels.release}, ownerReferences: [{apiVersion: .apiVersion, kind: .kind, blockOwnerDeletion: true, controller: true, uid: .metadata.uid, name: .metadata.name}]}}' > /workdir/podmonitormetadata.json && + kubectl create secret generic {{ $fullName }}-podmonitor-prometheus --from-file=token=/workdir/token --dry-run -o json > /workdir/podmonitorsecret.json && + cat /workdir/podmonitorsecret.json /workdir/podmonitormetadata.json | jq -s add > /workdir/secrets/podmonitorobject.json + {{- else if .Values.metrics.serviceMonitor.enabled }} + - | + mkdir -p /workdir/secrets && kubectl -n {{ .Release.Namespace }} get servicemonitor {{ $fullName }} -o json | + jq -c '{metadata: {name: "{{ $fullName }}-servicemonitor-prometheus", namespace: .metadata.namespace, labels: {app: .metadata.labels.app, release: .metadata.labels.release}, ownerReferences: [{apiVersion: .apiVersion, kind: .kind, blockOwnerDeletion: true, controller: true, uid: .metadata.uid, name: .metadata.name}]}}' > /workdir/servicemonitormetadata.json && + kubectl create secret generic {{ $fullName }}-servicemonitor-prometheus --from-file=token=/workdir/token --dry-run -o json > /workdir/servicemonitorsecret.json && + cat /workdir/servicemonitorsecret.json /workdir/servicemonitormetadata.json | jq -s add > /workdir/secrets/servicemonitorobject.json + {{- end }} + volumeMounts: + - name: workdir + mountPath: /workdir + resources: +{{ toYaml .Values.resources | indent 12 }} + containers: + - name: kubectl-apply + image: "{{ .Values.helmKubectlJqImage.repository }}:{{ .Values.helmKubectlJqImage.tag }}" + imagePullPolicy: {{ .Values.helmKubectlJqImage.pullPolicy }} + command: + - kubectl + - apply + - "-f" + - /workdir/secrets + volumeMounts: + - name: workdir + mountPath: /workdir + resources: +{{ toYaml .Values.resources | indent 12 }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-role.yaml b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-role.yaml new file mode 100644 index 000000000..fe07bf804 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-role.yaml @@ -0,0 +1,48 @@ +{{- if or .Values.metrics.serviceMonitor.enabled .Values.metrics.podMonitor.enabled }} +{{- $fullName := include "minio.fullname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $fullName }}-update-prometheus-secret + labels: + app: {{ template "minio.name" . }}-update-prometheus-secret + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create + - update + - patch + resourceNames: + {{- if .Values.metrics.serviceMonitor.enabled }} + - {{ $fullName }}-servicemonitor-prometheus + {{- end }} + {{- if .Values.metrics.podMonitor.enabled }} + - {{ $fullName }}-podmonitor-prometheus + {{- end}} + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - apiGroups: + - monitoring.coreos.com + resources: + {{- if .Values.metrics.serviceMonitor.enabled }} + - servicemonitors + {{- end}} + {{- if .Values.metrics.podMonitor.enabled }} + - podmonitors + {{- end}} + verbs: + - get + resourceNames: + - {{ $fullName }} +{{- end -}} \ No newline at end of file diff --git a/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-rolebinding.yaml b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-rolebinding.yaml new file mode 100644 index 000000000..9daf50d51 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if or .Values.metrics.serviceMonitor.enabled .Values.metrics.podMonitor.enabled }} +{{- $fullName := include "minio.fullname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $fullName }}-update-prometheus-secret + labels: + app: {{ template "minio.name" . }}-update-prometheus-secret + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ $fullName }}-update-prometheus-secret +subjects: + - kind: ServiceAccount + name: {{ $fullName }}-update-prometheus-secret + namespace: {{ .Release.Namespace | quote }} +{{- end -}} \ No newline at end of file diff --git a/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-serviceaccount.yaml b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-serviceaccount.yaml new file mode 100644 index 000000000..a704b5334 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/post-install-prometheus-metrics-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.metrics.serviceMonitor.enabled .Values.metrics.podMonitor.enabled }} +{{- $fullName := include "minio.fullname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $fullName }}-update-prometheus-secret + labels: + app: {{ template "minio.name" . }}-update-prometheus-secret + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} \ No newline at end of file diff --git a/deployment/helm/milvus/charts/minio/templates/pvc.yaml b/deployment/helm/milvus/charts/minio/templates/pvc.yaml new file mode 100644 index 000000000..99f0fd4d9 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/pvc.yaml @@ -0,0 +1,39 @@ +{{- if eq .Values.mode "standalone" }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "minio.fullname" . }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if and .Values.nasgateway.enabled .Values.nasgateway.pv }} + selector: + matchLabels: + pv: {{ .Values.nasgateway.pv | quote }} +{{- end }} + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.persistence.VolumeName }} + volumeName: "{{ .Values.persistence.VolumeName }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/rolebindings.yaml b/deployment/helm/milvus/charts/minio/templates/rolebindings.yaml new file mode 100644 index 000000000..ea8b98c58 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/rolebindings.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "minio.serviceAccountName" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + namespace: {{ .Release.Namespace | quote }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "minio.serviceAccountName" . }} +subjects: +- kind: ServiceAccount + name: {{ template "minio.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/secrets.yaml b/deployment/helm/milvus/charts/minio/templates/secrets.yaml new file mode 100644 index 000000000..c254142f9 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/secrets.yaml @@ -0,0 +1,32 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "minio.secretName" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + accesskey: {{ if .Values.accessKey }}{{ .Values.accessKey | toString | b64enc | quote }}{{ else }}{{ randAlphaNum 20 | b64enc | quote }}{{ end }} + secretkey: {{ if .Values.secretKey }}{{ .Values.secretKey | toString | b64enc | quote }}{{ else }}{{ randAlphaNum 40 | b64enc | quote }}{{ end }} +{{- if and .Values.gcsgateway.enabled .Values.gcsgateway.gcsKeyJson }} + gcs_key.json: {{ .Values.gcsgateway.gcsKeyJson | toString | b64enc }} +{{- end }} +{{- if .Values.s3gateway.enabled -}} +{{- if .Values.s3gateway.accessKey }} + awsAccessKeyId: {{ .Values.s3gateway.accessKey | toString | b64enc | quote }} +{{- end }} +{{- if .Values.s3gateway.secretKey }} + awsSecretAccessKey: {{ .Values.s3gateway.secretKey | toString | b64enc | quote }} +{{- end }} +{{- end }} +{{- if .Values.etcd.clientCert }} + etcd_client_cert.pem: {{ .Values.etcd.clientCert | toString | b64enc | quote }} +{{- end }} +{{- if .Values.etcd.clientCertKey }} + etcd_client_cert_key.pem: {{ .Values.etcd.clientCertKey | toString | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/securitycontextconstraints.yaml b/deployment/helm/milvus/charts/minio/templates/securitycontextconstraints.yaml new file mode 100644 index 000000000..4bac7e372 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/securitycontextconstraints.yaml @@ -0,0 +1,45 @@ +{{- if and .Values.securityContext.enabled .Values.persistence.enabled (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: [] +readOnlyRootFilesystem: false +defaultAddCapabilities: [] +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +fsGroup: + type: MustRunAs + ranges: + - max: {{ .Values.securityContext.fsGroup }} + min: {{ .Values.securityContext.fsGroup }} +runAsUser: + type: MustRunAs + uid: {{ .Values.securityContext.runAsUser }} +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/service.yaml b/deployment/helm/milvus/charts/minio/templates/service.yaml new file mode 100644 index 000000000..ea681e220 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/service.yaml @@ -0,0 +1,47 @@ +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} + type: ClusterIP + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP +{{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} +{{- else }} + targetPort: 9000 +{{- end}} +{{- if .Values.service.externalIPs }} + externalIPs: +{{- range $i , $ip := .Values.service.externalIPs }} + - {{ $ip }} +{{- end }} +{{- end }} + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} diff --git a/deployment/helm/milvus/charts/minio/templates/serviceaccount.yaml b/deployment/helm/milvus/charts/minio/templates/serviceaccount.yaml new file mode 100644 index 000000000..243dfef53 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "minio.serviceAccountName" . | quote }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" +{{- end -}} diff --git a/deployment/helm/milvus/charts/minio/templates/servicemonitor.yaml b/deployment/helm/milvus/charts/minio/templates/servicemonitor.yaml new file mode 100644 index 000000000..1ad90a563 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "minio.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ $scheme }} + path: /minio/v2/metrics/cluster + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelConfigs }} +{{ toYaml .Values.metrics.serviceMonitor.relabelConfigs | indent 6 }} + {{- end }} + bearerTokenSecret: + name: {{ template "minio.fullname" . }}-servicemonitor-prometheus + key: token + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: + app: {{ include "minio.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/templates/statefulset.yaml b/deployment/helm/milvus/charts/minio/templates/statefulset.yaml new file mode 100644 index 000000000..ff16079cf --- /dev/null +++ b/deployment/helm/milvus/charts/minio/templates/statefulset.yaml @@ -0,0 +1,213 @@ +{{- if eq .Values.mode "distributed" }} +{{ $zoneCount := .Values.zones | int }} +{{ $nodeCount := .Values.replicas | int }} +{{ $drivesPerNode := .Values.drivesPerNode | int }} +{{ $scheme := "http" }} +{{- if .Values.tls.enabled }} +{{ $scheme = "https" }} +{{ end }} +{{ $mountPath := .Values.mountPath }} +{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} +{{ $subPath := .Values.persistence.subPath }} +{{ $penabled := .Values.persistence.enabled }} +{{ $accessMode := .Values.persistence.accessMode }} +{{ $storageClass := .Values.persistence.storageClass }} +{{ $psize := .Values.persistence.size }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "minio.fullname" . }}-svc + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: {{ $scheme }} + port: {{ .Values.service.port }} + protocol: TCP + selector: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +--- +apiVersion: {{ template "minio.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} +{{- end }} +{{- if .Values.additionalAnnotations }} + annotations: +{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + updateStrategy: + type: {{ .Values.StatefulSetUpdate.updateStrategy }} + podManagementPolicy: "Parallel" + serviceName: {{ template "minio.fullname" . }}-svc + replicas: {{ mul $zoneCount $nodeCount }} + selector: + matchLabels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} + template: + metadata: + name: {{ template "minio.fullname" . }} + labels: + app: {{ template "minio.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} +{{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + serviceAccountName: {{ include "minio.serviceAccountName" . | quote }} +{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio -S {{ .Values.certsPath }} server {{- range $i := until $zoneCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{else}}{{ $bucketRoot }}{{end}}{{- end}}{{- template `minio.extraArgs` . }}" ] + volumeMounts: + {{- if $penabled }} + {{- if (gt $drivesPerNode 1) }} + {{- range $i := until $drivesPerNode }} + - name: export-{{ $i }} + mountPath: {{ $mountPath }}-{{ $i }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- else }} + - name: export + mountPath: {{ $mountPath }} + {{- if and $penabled $subPath }} + subPath: {{ $subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} + ports: + - name: {{ $scheme }} + containerPort: 9000 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /minio/health/live + port: {{ $scheme }} + scheme: {{ $scheme | upper }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: {{ $scheme }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: {{ $scheme }} + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "minio.secretName" . }} + key: secretkey + {{- range $key, $val := .Values.environment }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end}} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} +{{- include "minio.imagePullSecrets" . | indent 6 }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: minio-user + secret: + secretName: {{ template "minio.secretName" . }} + {{- include "minio.tlsKeysVolume" . | indent 8 }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode}} + - metadata: + name: export-{{ $diskId }} + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} + {{- else }} + - metadata: + name: export + spec: + accessModes: [ {{ $accessMode | quote }} ] + {{- if $storageClass }} + storageClassName: {{ $storageClass }} + {{- end }} + resources: + requests: + storage: {{ $psize }} + {{- end }} +{{- end }} +{{- end }} diff --git a/deployment/helm/milvus/charts/minio/values.yaml b/deployment/helm/milvus/charts/minio/values.yaml new file mode 100644 index 000000000..13510ca47 --- /dev/null +++ b/deployment/helm/milvus/charts/minio/values.yaml @@ -0,0 +1,384 @@ +## Provide a name in place of minio for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## set kubernetes cluster domain where minio is running +## +clusterDomain: cluster.local + +## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the +## +image: + repository: minio/minio + tag: RELEASE.2021-02-14T04-01-33Z + pullPolicy: IfNotPresent + +## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio +## client used to create a default bucket). +## +mcImage: + repository: minio/mc + tag: RELEASE.2021-02-14T04-28-06Z + pullPolicy: IfNotPresent + +## Set default image, imageTag, and imagePullPolicy for the `jq` (the JSON +## process used to create secret for prometheus ServiceMonitor). +## +helmKubectlJqImage: + repository: bskim45/helm-kubectl-jq + tag: 3.1.0 + pullPolicy: IfNotPresent + +## minio server mode, i.e. standalone or distributed. +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: standalone + +## Additional labels to include with deployment or statefulset +additionalLabels: [] + +## Additional annotations to include with deployment or statefulset +additionalAnnotations: [] + +## Additional arguments to pass to minio binary +extraArgs: [] + +## Update strategy for Deployments +DeploymentUpdate: + type: RollingUpdate + maxUnavailable: 0 + maxSurge: 100% + +## Update strategy for StatefulSets +StatefulSetUpdate: + updateStrategy: RollingUpdate + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Set default accesskey, secretkey, Minio config file path, volume mount path and +## number of nodes (only used for Minio distributed mode) +## AccessKey and secretKey is generated when not set +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +accessKey: "" +secretKey: "" +certsPath: "/etc/minio/certs/" +configPathmc: "/etc/minio/mc/" +mountPath: "/export" + +## Use existing Secret that store following variables: +## +## | Chart var | .data. in Secret | +## |:----------------------|:-------------------------| +## | accessKey | accesskey | +## | secretKey | secretkey | +## | gcsgateway.gcsKeyJson | gcs_key.json | +## | s3gateway.accessKey | awsAccessKeyId | +## | s3gateway.secretKey | awsSecretAccessKey | +## | etcd.clientCert | etcd_client_cert.pem | +## | etcd.clientCertKey | etcd_client_cert_key.pem | +## +## All mentioned variables will be ignored in values file. +## .data.accesskey and .data.secretkey are mandatory, +## others depend on enabled status of corresponding sections. +existingSecret: "" + +## Override the root directory which the minio server should serve from. +## If left empty, it defaults to the value of {{ .Values.mountPath }} +## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} +bucketRoot: "" + +# Number of drives attached to a node +drivesPerNode: 1 +# Number of MinIO containers running +replicas: 4 +# Number of expanded MinIO clusters +zones: 1 + +## TLS Settings for Minio +tls: + enabled: false + ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + certSecret: "" + publicCrt: public.crt + privateKey: private.key + +## Trusted Certificates Settings for Minio. Ref: https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls#install-certificates-from-third-party-cas +## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret +## When using self-signed certificates, remember to include Minio's own certificate in the bundle with key public.crt. +## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. +trustedCertsSecret: "" + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + annotations: + helm.sh/resource-policy: "keep" + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## minio data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + ## Storage class of PV to bind. By default it looks for standard storage class. + ## If the PV uses a different storage class, specify that here. + storageClass: "" + VolumeName: "" + accessMode: ReadWriteOnce + size: 500Gi + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + ## + subPath: "" + +## Expose the Minio service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## + +service: + type: ClusterIP + clusterIP: ~ + port: 9000 + nodePort: 32000 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + # - externalIp1 + + annotations: {} + # prometheus.io/scrape: 'true' + # prometheus.io/path: '/minio/prometheus/metrics' + # prometheus.io/port: '9000' + +## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## + +imagePullSecrets: [] +# - name: "image-pull-secret" + +ingress: + enabled: false + labels: {} + # node-role.kubernetes.io/ingress: platform + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.allow-http: "false" + # kubernetes.io/ingress.global-static-ip-name: "" + # nginx.ingress.kubernetes.io/secure-backends: "true" + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +tolerations: [] +affinity: {} + +## Add stateful containers to have security context, if enabled MinIO will run as this +## user and group NOTE: securityContext is only enabled if persistence.enabled=true +securityContext: + enabled: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + +# Additational pod annotations +podAnnotations: {} + +# Additional pod labels +podLabels: {} + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 4Gi + +## Create a bucket after minio install +## +defaultBucket: + enabled: false + ## If enabled, must be a string with length > 0 + name: bucket + ## Can be one of none|download|upload|public + policy: none + ## Purge if bucket exists already + purge: false + ## set versioning for bucket true|false + # versioning: false + +## Create multiple buckets after minio install +## Enabling `defaultBucket` will take priority over this list +## +buckets: [] + # - name: bucket1 + # policy: none + # purge: false + # - name: bucket2 + # policy: none + # purge: false + +## Additional Annotations for the Kubernetes Batch (make-bucket-job) +makeBucketJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + resources: + requests: + memory: 128Mi + +## Additional Annotations for the Kubernetes Batch (update-prometheus-secret) +updatePrometheusJob: + podAnnotations: + annotations: + securityContext: + enabled: false + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + +s3gateway: + enabled: false + replicas: 4 + serviceEndpoint: "" + accessKey: "" + secretKey: "" + +## Use minio as an azure blob gateway, you should disable data persistence so no volume claim are created. +## https://docs.minio.io/docs/minio-gateway-for-azure +azuregateway: + enabled: false + # Number of parallel instances + replicas: 4 + +## Use minio as GCS (Google Cloud Storage) gateway, you should disable data persistence so no volume claim are created. +## https://docs.minio.io/docs/minio-gateway-for-gcs + +gcsgateway: + enabled: false + # Number of parallel instances + replicas: 4 + # credential json file of service account key + gcsKeyJson: "" + # Google cloud project-id + projectId: "" + +## Use minio on NAS backend +## https://docs.minio.io/docs/minio-gateway-for-nas + +nasgateway: + enabled: false + # Number of parallel instances + replicas: 4 + # For NAS Gateway, you may want to bind the PVC to a specific PV. To ensure that happens, PV to bind to should have + # a label like "pv: ", use value here. + pv: ~ + +## Use this field to add environment variables relevant to Minio server. These fields will be passed on to Minio container(s) +## when Chart is deployed +environment: {} + ## Please refer for comprehensive list https://docs.minio.io/docs/minio-server-configuration-guide.html + ## MINIO_DOMAIN: "chart-example.local" + ## MINIO_BROWSER: "off" + +networkPolicy: + enabled: false + allowExternal: true + +## PodDisruptionBudget settings +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +## Specify the service account to use for the Minio pods. If 'create' is set to 'false' +## and 'name' is left unspecified, the account 'default' will be used. +serviceAccount: + create: true + ## The name of the service account to use. If 'create' is 'true', a service account with that name + ## will be created. Otherwise, a name will be auto-generated. + name: + +metrics: + # Metrics can not be disabled yet: https://github.com/minio/minio/issues/7493 + serviceMonitor: + enabled: false + additionalLabels: {} + relabelConfigs: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + podMonitor: + enabled: false + # interval: 30s + # scrapeTimeout: 10s + +## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md +## Define endpoints to enable this section. +etcd: + endpoints: [] + pathPrefix: "" + corednsPathPrefix: "" + clientCert: "" + clientCertKey: "" + +## Configure extra options for liveness probe +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for readiness probe +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for startupProbe probe +startupProbe: + enabled: true + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 60 diff --git a/deployment/helm/milvus/charts/pulsar/.helmignore b/deployment/helm/milvus/charts/pulsar/.helmignore new file mode 100644 index 000000000..b88e75f8e --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/.helmignore @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deployment/helm/milvus/charts/pulsar/Chart.lock b/deployment/helm/milvus/charts/pulsar/Chart.lock new file mode 100644 index 000000000..a460bfbad --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kube-prometheus-stack + repository: https://prometheus-community.github.io/helm-charts + version: 56.9.0 +digest: sha256:4d83a1bf6fe9b53b8bca45bb240d2e4c1979e068cdc67af53cdadc0885093dcf +generated: "2024-02-23T21:26:50.977495+02:00" diff --git a/deployment/helm/milvus/charts/pulsar/Chart.yaml b/deployment/helm/milvus/charts/pulsar/Chart.yaml new file mode 100644 index 000000000..024d7f864 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +appVersion: 3.0.2 +dependencies: +- condition: kube-prometheus-stack.enabled + name: kube-prometheus-stack + repository: https://prometheus-community.github.io/helm-charts + version: 56.x.x +description: Apache Pulsar Helm chart for Kubernetes +home: https://pulsar.apache.org +icon: https://pulsar.apache.org/img/pulsar.svg +kubeVersion: '>=1.21.0-0' +maintainers: +- email: dev@pulsar.apache.org + name: The Apache Pulsar Team +name: pulsar +sources: +- https://github.com/apache/pulsar +- https://github.com/apache/pulsar-helm-chart +version: 3.3.0 diff --git a/deployment/helm/milvus/charts/pulsar/LICENSE b/deployment/helm/milvus/charts/pulsar/LICENSE new file mode 100644 index 000000000..69f37805e --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/LICENSE @@ -0,0 +1,239 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +---------------------------------------------------------------------------------------------------- + +pulsar-common/src/main/java/org/apache/pulsar/common/util/protobuf/ByteBufCoded{Input,Output}Stream.java + +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/deployment/helm/milvus/charts/pulsar/NOTICE b/deployment/helm/milvus/charts/pulsar/NOTICE new file mode 100644 index 000000000..bbbe4fab8 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/NOTICE @@ -0,0 +1,5 @@ +Apache Pulsar +Copyright 2017-2022 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/.helmignore b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/.helmignore new file mode 100644 index 000000000..1937f42c7 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/.helmignore @@ -0,0 +1,28 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# helm/charts +OWNERS +hack/ +ci/ +kube-prometheus-*.tgz + +unittests/ diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/CONTRIBUTING.md b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/CONTRIBUTING.md new file mode 100644 index 000000000..f6ce2a323 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing Guidelines + +## How to contribute to this chart + +1. Fork this repository, develop and test your Chart. +1. Bump the chart version for every change. +1. Ensure PR title has the prefix `[kube-prometheus-stack]` +1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories +1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes. +1. Check for changes of RBAC rules. +1. Check for changes in CRD specs. +1. PR must pass the linter (`helm lint`) diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/Chart.lock b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/Chart.lock new file mode 100644 index 000000000..43788fa68 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/Chart.lock @@ -0,0 +1,18 @@ +dependencies: +- name: crds + repository: "" + version: 0.0.0 +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.16.0 +- name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.30.2 +- name: grafana + repository: https://grafana.github.io/helm-charts + version: 7.3.1 +- name: prometheus-windows-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 0.3.1 +digest: sha256:1c7cb74fde9c43f6d2529e9b6001621c72649a8cb1cf00e02e20e24f291c436d +generated: "2024-02-22T12:16:23.259801305Z" diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/Chart.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/Chart.yaml new file mode 100644 index 000000000..345050320 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/Chart.yaml @@ -0,0 +1,65 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts + - name: Upstream Project + url: https://github.com/prometheus-operator/kube-prometheus + artifacthub.io/operator: "true" +apiVersion: v2 +appVersion: v0.71.2 +dependencies: +- condition: crds.enabled + name: crds + repository: "" + version: 0.0.0 +- condition: kubeStateMetrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.16.* +- condition: nodeExporter.enabled + name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.30.* +- condition: grafana.enabled + name: grafana + repository: https://grafana.github.io/helm-charts + version: 7.3.* +- condition: windowsMonitoring.enabled + name: prometheus-windows-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 0.3.* +description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, + and Prometheus rules combined with documentation and scripts to provide easy to + operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus + Operator. +home: https://github.com/prometheus-operator/kube-prometheus +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +keywords: +- operator +- prometheus +- kube-prometheus +kubeVersion: '>=1.19.0-0' +maintainers: +- email: andrew@quadcorps.co.uk + name: andrewgkew +- email: gianrubio@gmail.com + name: gianrubio +- email: github.gkarthiks@gmail.com + name: gkarthiks +- email: kube-prometheus-stack@sisti.pt + name: GMartinez-Sisti +- email: github@jkroepke.de + name: jkroepke +- email: scott@r6by.com + name: scottrigby +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: quentin.bisson@gmail.com + name: QuentinBisson +name: kube-prometheus-stack +sources: +- https://github.com/prometheus-community/helm-charts +- https://github.com/prometheus-operator/kube-prometheus +type: application +version: 56.9.0 diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/README.md b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/README.md new file mode 100644 index 000000000..1b9ad7cd8 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/README.md @@ -0,0 +1,1029 @@ +# kube-prometheus-stack + +Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). + +See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts. + +_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._ + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3+ + +## Get Helm Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Helm Chart + +```console +helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [prometheus-community/kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) +- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) +- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana) + +To disable dependencies during installation, see [multiple releases](#multiple-releases) below. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Helm Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +CRDs created by this chart are not removed by default and should be manually cleaned up: + +```console +kubectl delete crd alertmanagerconfigs.monitoring.coreos.com +kubectl delete crd alertmanagers.monitoring.coreos.com +kubectl delete crd podmonitors.monitoring.coreos.com +kubectl delete crd probes.monitoring.coreos.com +kubectl delete crd prometheusagents.monitoring.coreos.com +kubectl delete crd prometheuses.monitoring.coreos.com +kubectl delete crd prometheusrules.monitoring.coreos.com +kubectl delete crd scrapeconfigs.monitoring.coreos.com +kubectl delete crd servicemonitors.monitoring.coreos.com +kubectl delete crd thanosrulers.monitoring.coreos.com +``` + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack +``` + +With Helm v3, CRDs created by this chart are not updated by default and should be manually updated. +Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions). + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. + +### From 55.x to 56.x + +This version upgrades Prometheus-Operator to v0.71.0, Prometheus to 2.49.1 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 54.x to 55.x + +This version upgrades Prometheus-Operator to v0.70.0 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 53.x to 54.x + +Grafana Helm Chart has bumped to version 7 + +Please note Grafana Helm Chart [changelog](https://github.com/grafana/helm-charts/tree/main/charts/grafana#to-700). + +### From 52.x to 53.x + +This version upgrades Prometheus-Operator to v0.69.1, Prometheus to 2.47.2 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 51.x to 52.x + +This includes the ability to select between using existing secrets or create new secret objects for various thanos config. The defaults have not changed but if you were setting: + +- `thanosRuler.thanosRulerSpec.alertmanagersConfig` or +- `thanosRuler.thanosRulerSpec.objectStorageConfig` or +- `thanosRuler.thanosRulerSpec.queryConfig` or +- `prometheus.prometheusSpec.thanos.objectStorageConfig` + +you will have to need to set `existingSecret` or `secret` based on your requirement + +For instance, the `thanosRuler.thanosRulerSpec.alertmanagersConfig` used to be configured as follow: + +```yaml +thanosRuler: + thanosRulerSpec: + alertmanagersConfig: + alertmanagers: + - api_version: v2 + http_config: + basic_auth: + username: some_user + password: some_pass + static_configs: + - alertmanager.thanos.io + scheme: http + timeout: 10s +``` + +But it now moved to: + +```yaml +thanosRuler: + thanosRulerSpec: + alertmanagersConfig: + secret: + alertmanagers: + - api_version: v2 + http_config: + basic_auth: + username: some_user + password: some_pass + static_configs: + - alertmanager.thanos.io + scheme: http + timeout: 10s +``` + +or the `thanosRuler.thanosRulerSpec.objectStorageConfig` used to be configured as follow: + +```yaml +thanosRuler: + thanosRulerSpec: + objectStorageConfig: + name: existing-secret-not-created-by-this-chart + key: object-storage-configs.yaml +``` + +But it now moved to: + +```yaml +thanosRuler: + thanosRulerSpec: + objectStorageConfig: + existingSecret: + name: existing-secret-not-created-by-this-chart + key: object-storage-configs.yaml +``` + +### From 50.x to 51.x + +This version upgrades Prometheus-Operator to v0.68.0, Prometheus to 2.47.0 and Thanos to v0.32.2 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 49.x to 50.x + +This version requires Kubernetes 1.19+. + +We do not expect any breaking changes in this version. + +### From 48.x to 49.x + +This version upgrades Prometheus-Operator to v0.67.1, 0, Alertmanager to v0.26.0, Prometheus to 2.46.0 and Thanos to v0.32.0 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.67.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 47.x to 48.x + +This version moved all CRDs into a dedicated sub-chart. No new CRDs are introduced in this version. +See [#3548](https://github.com/prometheus-community/helm-charts/issues/3548) for more context. + +We do not expect any breaking changes in this version. + +### From 46.x to 47.x + +This version upgrades Prometheus-Operator to v0.66.0 with new CRDs (PrometheusAgent and ScrapeConfig). + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 45.x to 46.x + +This version upgrades Prometheus-Operator to v0.65.1 with new CRDs (PrometheusAgent and ScrapeConfig), Prometheus to v2.44.0 and Thanos to v0.31.0. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 44.x to 45.x + +This version upgrades Prometheus-Operator to v0.63.0, Prometheus to v2.42.0 and Thanos to v0.30.2. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 43.x to 44.x + +This version upgrades Prometheus-Operator to v0.62.0, Prometheus to v2.41.0 and Thanos to v0.30.1. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +If you have explicitly set `prometheusOperator.admissionWebhooks.failurePolicy`, this value is now always used even when `.prometheusOperator.admissionWebhooks.patch.enabled` is `true` (the default). + +The values for `prometheusOperator.image.tag` & `prometheusOperator.prometheusConfigReloader.image.tag` are now empty by default and the Chart.yaml `appVersion` field is used instead. + +### From 42.x to 43.x + +This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 41.x to 42.x + +This includes the overridability of container registry for all containers at the global level using `global.imageRegistry` or per container image. The defaults have not changed but if you were using a custom image, you will have to override the registry of said custom container image before you upgrade. + +For instance, the prometheus-config-reloader used to be configured as follow: + +```yaml + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.60.1 + sha: "" +``` + +But it now moved to: + +```yaml + image: + registry: quay.io + repository: prometheus-operator/prometheus-config-reloader + tag: v0.60.1 + sha: "" +``` + +### From 40.x to 41.x + +This version upgrades Prometheus-Operator to v0.60.1, Prometheus to v2.39.1 and Thanos to v0.28.1. +This version also upgrades the Helm charts of kube-state-metrics to 4.20.2, prometheus-node-exporter to 4.3.0 and Grafana to 6.40.4. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +This version splits kubeScheduler recording and altering rules in separate config values. +Instead of `defaultRules.rules.kubeScheduler` the 2 new variables `defaultRules.rules.kubeSchedulerAlerting` and `defaultRules.rules.kubeSchedulerRecording` are used. + +### From 39.x to 40.x + +This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0. +This version also upgrades the Helm charts of kube-state-metrics to 4.18.0 and prometheus-node-exporter to 4.2.0. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +Starting from prometheus-node-exporter version 4.0.0, the `node exporter` chart is using the [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). Therefore you have to delete the daemonset before you upgrade. + +```console +kubectl delete daemonset -l app=prometheus-node-exporter +helm upgrade -i kube-prometheus-stack prometheus-community/kube-prometheus-stack +``` + +If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels. + +### From 38.x to 39.x + +This upgraded prometheus-operator to v0.58.0 and prometheus to v2.37.0 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 37.x to 38.x + +Reverted one of the default metrics relabelings for cAdvisor added in 36.x, due to it breaking container_network_* and various other statistics. If you do not want this change, you will need to override the `kubelet.cAdvisorMetricRelabelings`. + +### From 36.x to 37.x + +This includes some default metric relabelings for cAdvisor and apiserver metrics to reduce cardinality. If you do not want these defaults, you will need to override the `kubeApiServer.metricRelabelings` and or `kubelet.cAdvisorMetricRelabelings`. + +### From 35.x to 36.x + +This upgraded prometheus-operator to v0.57.0 and prometheus to v2.36.1 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 34.x to 35.x + +This upgraded prometheus-operator to v0.56.0 and prometheus to v2.35.0 + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 33.x to 34.x + +This upgrades to prometheus-operator to v0.55.0 and prometheus to v2.33.5. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 32.x to 33.x + +This upgrades the prometheus-node-exporter Chart to v3.0.0. Please review the changes to this subchart if you make customizations to hostMountPropagation. + +### From 31.x to 32.x + +This upgrades to prometheus-operator to v0.54.0 and prometheus to v2.33.1. It also changes the default for `grafana.serviceMonitor.enabled` to `true. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 30.x to 31.x + +This version removes the built-in grafana ServiceMonitor and instead relies on the ServiceMonitor of the sub-chart. +`grafana.serviceMonitor.enabled` must be set instead of `grafana.serviceMonitor.selfMonitor` and the old ServiceMonitor may +need to be manually cleaned up after deploying the new release. + +### From 29.x to 30.x + +This version updates kube-state-metrics to 4.3.0 and uses the new option `kube-state-metrics.releaseLabel=true` which adds the "release" label to kube-state-metrics labels, making scraping of the metrics by kube-prometheus-stack work out of the box again, independent of the used kube-prometheus-stack release name. If you already set the "release" label via `kube-state-metrics.customLabels` you might have to remove that and use it via the new option. + +### From 28.x to 29.x + +This version makes scraping port for kube-controller-manager and kube-scheduler dynamic to reflect changes to default serving ports +for those components in Kubernetes versions v1.22 and v1.23 respectively. + +If you deploy on clusters using version v1.22+, kube-controller-manager will be scraped over HTTPS on port 10257. + +If you deploy on clusters running version v1.23+, kube-scheduler will be scraped over HTTPS on port 10259. + +### From 27.x to 28.x + +This version disables PodSecurityPolicies by default because they are deprecated in Kubernetes 1.21 and will be removed in Kubernetes 1.25. + +If you are using PodSecurityPolicies you can enable the previous behaviour by setting `kube-state-metrics.podSecurityPolicy.enabled`, `prometheus-node-exporter.rbac.pspEnabled`, `grafana.rbac.pspEnabled` and `global.rbac.pspEnabled` to `true`. + +### From 26.x to 27.x + +This version splits prometheus-node-exporter chart recording and altering rules in separate config values. +Instead of `defaultRules.rules.node` the 2 new variables `defaultRules.rules.nodeExporterAlerting` and `defaultRules.rules.nodeExporterRecording` are used. + +Also the following defaultRules.rules has been removed as they had no effect: `kubeApiserverError`, `kubePrometheusNodeAlerting`, `kubernetesAbsent`, `time`. + +The ability to set a rubookUrl via `defaultRules.rules.rubookUrl` was reintroduced. + +### From 25.x to 26.x + +This version enables the prometheus-node-exporter subchart servicemonitor by default again, by setting `prometheus-node-exporter.prometheus.monitor.enabled` to `true`. + +### From 24.x to 25.x + +This version upgrade to prometheus-operator v0.53.1. It removes support for setting a runbookUrl, since the upstream format for runbooks changed. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 23.x to 24.x + +The custom `ServiceMonitor` for the _kube-state-metrics_ & _prometheus-node-exporter_ charts have been removed in favour of the built-in sub-chart `ServiceMonitor`; for both sub-charts this means that `ServiceMonitor` customisations happen via the values passed to the chart. If you haven't directly customised this behaviour then there are no changes required to upgrade, but if you have please read the following. + +For _kube-state-metrics_ the `ServiceMonitor` customisation is now set via `kube-state-metrics.prometheus.monitor` and the `kubeStateMetrics.serviceMonitor.selfMonitor.enabled` value has moved to `kube-state-metrics.selfMonitor.enabled`. + +For _prometheus-node-exporter_ the `ServiceMonitor` customisation is now set via `prometheus-node-exporter.prometheus.monitor` and the `nodeExporter.jobLabel` values has moved to `prometheus-node-exporter.prometheus.monitor.jobLabel`. + +### From 22.x to 23.x + +Port names have been renamed for Istio's +[explicit protocol selection](https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/#explicit-protocol-selection). + +| | old value | new value | +|-|-----------|-----------| +| `alertmanager.alertmanagerSpec.portName` | `web` | `http-web` | +| `grafana.service.portName` | `service` | `http-web` | +| `prometheus-node-exporter.service.portName` | `metrics` (hardcoded) | `http-metrics` | +| `prometheus.prometheusSpec.portName` | `web` | `http-web` | + +### From 21.x to 22.x + +Due to the upgrade of the `kube-state-metrics` chart, removal of its deployment/stateful needs to done manually prior to upgrading: + +```console +kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus-operator,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +or if you use autosharding: + +```console +kubectl delete statefulsets.apps -l app.kubernetes.io/instance=prometheus-operator,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +### From 20.x to 21.x + +The config reloader values have been refactored. All the values have been moved to the key `prometheusConfigReloader` and the limits and requests can now be set separately. + +### From 19.x to 20.x + +Version 20 upgrades prometheus-operator from 0.50.x to 0.52.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 18.x to 19.x + +`kubeStateMetrics.serviceMonitor.namespaceOverride` was removed. +Please use `kube-state-metrics.namespaceOverride` instead. + +### From 17.x to 18.x + +Version 18 upgrades prometheus-operator from 0.49.x to 0.50.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 16.x to 17.x + +Version 17 upgrades prometheus-operator from 0.48.x to 0.49.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 15.x to 16.x + +Version 16 upgrades kube-state-metrics to v2.0.0. This includes changed command-line arguments and removed metrics, see this [blog post](https://kubernetes.io/blog/2021/04/13/kube-state-metrics-v-2-0/). This version also removes Grafana dashboards that supported Kubernetes 1.14 or earlier. + +### From 14.x to 15.x + +Version 15 upgrades prometheus-operator from 0.46.x to 0.47.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 13.x to 14.x + +Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 12.x to 13.x + +Version 13 upgrades prometheus-operator from 0.44.x to 0.45.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +``` + +### From 11.x to 12.x + +Version 12 upgrades prometheus-operator from 0.43.x to 0.44.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.44/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +``` + +The chart was migrated to support only helm v3 and later. + +### From 10.x to 11.x + +Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +``` + +Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0. + +### From 9.x to 10.x + +Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +``` + +### From 8.x to 9.x + +Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration. + +### From 7.x to 8.x + +Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0 +``` + +Then upgrade to 8.x.x + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x] +``` + +Minimal recommended Prometheus version for this chart release is `2.12.x` + +### From 6.x to 7.x + +Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0. + +### From 5.x to 6.x + +Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified: + +```console +invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable +``` + +If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values prometheus-community/kube-prometheus-stack +``` + +You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. + +### Multiple releases + +The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`. + +## Work-Arounds for Known Issues + +### Running on private GKE clusters + +When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes cluster’s network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod. + +You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) + +Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`. + +## PrometheusRules Admission Webhooks + +With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster. + +### How the Chart Configures the Hooks + +A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks. + +1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits. +2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate. +3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set. +4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations + +### Alternatives + +It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested. + +You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `prometheusOperator.admissionWebhooks.certManager.enabled` value to true. + +### Limitations + +Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default. + +## Developing Prometheus Rules and Grafana Dashboards + +This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repository](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts. + +## Further Information + +For more in-depth documentation of configuration options meanings, please see + +- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) +- [Prometheus](https://prometheus.io/docs/introduction/overview/) +- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart) + +## prometheus.io/scrape + +The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options. +For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here: + +- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#include-servicemonitors) +- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#include-podmonitors) +- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/running-exporters.md) + +By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release. +Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications. +An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering. +To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`. + +## Migrating from stable/prometheus-operator chart + +## Zero downtime + +Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved. +However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime). + +You can override the name to achieve this: + +```console +helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator +``` + +**Note**: It is recommended to run this first with `--dry-run --debug`. + +## Redeploy with new name (downtime) + +If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration: + +> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace. + +1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy: + + ```console + kubectl patch pv/ -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + ``` + + **Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released. + + ```console + helm uninstall prometheus-operator -n monitoring + kubectl delete pvc/ -n monitoring + ``` + + Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service. + + ```console + kubectl delete service/prometheus-operator-kubelet -n kube-system + ``` + + You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to. + +3. Remove current `spec.claimRef` values to change the PV's status from Released to Available. + + ```console + kubectl patch pv/ --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring + ``` + +**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`. + +The binding is done via matching a specific amount of storage requested and with certain access modes. + +For example, if you had storage specified as this with **prometheus-operator**: + +```yaml +volumeClaimTemplate: + spec: + storageClassName: gp2 + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 50Gi +``` + +You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode. + +Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV. + +This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to: + +```yaml +nodeSelector: + failure-domain.beta.kubernetes.io/zone: east-west-1a +``` + +or passing these values as `--set` overrides during installation. + +The new release should now re-attach your previously released PV with its content. + +## Migrating from coreos/prometheus-operator chart + +The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster. + +There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support. + +The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy. + +You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765). + +### High-level overview of Changes + +#### Added dependencies + +The chart has added 3 [dependencies](#dependencies). + +- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components +- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md) + +#### Kubelet Service + +Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice. + +#### Persistent Volumes + +If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pvc-prometheus-migration-prometheus-0 +spec: + accessModes: + - ReadWriteOnce + azureDisk: + cachingMode: None + diskName: pvc-prometheus-migration-prometheus-0 + diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0 + fsType: "" + kind: Managed + readOnly: false + capacity: + storage: 1Gi + persistentVolumeReclaimPolicy: Delete + storageClassName: prometheus + volumeMode: Filesystem +``` + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/name: prometheus + prometheus: prometheus-migration-prometheus + name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0 + namespace: monitoring +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: prometheus + volumeMode: Filesystem + volumeName: pvc-prometheus-migration-prometheus-0 +``` + +The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used. + +#### KubeProxy + +The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them. + +Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example: + +```console +kubectl -n kube-system edit cm kube-proxy +``` + +```yaml +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + # ... + # metricsBindAddress: 127.0.0.1:10249 + metricsBindAddress: 0.0.0.0:10249 + # ... + kubeconfig.conf: |- + # ... +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system +``` diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/Chart.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/Chart.yaml new file mode 100644 index 000000000..adb9e4a5d --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v2 +name: crds +version: 0.0.0 diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/README.md b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/README.md new file mode 100644 index 000000000..02092b964 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/README.md @@ -0,0 +1,3 @@ +# crds subchart + +See: [https://github.com/prometheus-community/helm-charts/issues/3548](https://github.com/prometheus-community/helm-charts/issues/3548) diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagerconfigs.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagerconfigs.yaml new file mode 100644 index 000000000..ce64b9bc3 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagerconfigs.yaml @@ -0,0 +1,5722 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.2/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.71.2 + name: alertmanagerconfigs.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: AlertmanagerConfig + listKind: AlertmanagerConfigList + plural: alertmanagerconfigs + shortNames: + - amcfg + singular: alertmanagerconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: AlertmanagerConfig configures the Prometheus Alertmanager, specifying + how alerts should be grouped, inhibited and notified to external systems. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AlertmanagerConfigSpec is a specification of the desired + behavior of the Alertmanager configuration. By definition, the Alertmanager + configuration only applies to alerts for which the `namespace` label + is equal to the namespace of the AlertmanagerConfig resource. + properties: + inhibitRules: + description: List of inhibition rules. The rules will only apply to + alerts matching the resource's namespace. + items: + description: InhibitRule defines an inhibition rule that allows + to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule + properties: + equal: + description: Labels that must have an equal value in the source + and target alert for the inhibition to take effect. + items: + type: string + type: array + sourceMatch: + description: Matchers for which one or more alerts have to exist + for the inhibition to take effect. The operator enforces that + the alert matches the resource's namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager + >= v0.22.0 and takes precedence over Regex (deprecated) + if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: 'Whether to match on equality (false) or + regular-expression (true). Deprecated: for AlertManager + >= v0.22.0, `matchType` should be used instead.' + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + targetMatch: + description: Matchers that have to be fulfilled in the alerts + to be muted. The operator enforces that the alert matches + the resource's namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager + >= v0.22.0 and takes precedence over Regex (deprecated) + if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: 'Whether to match on equality (false) or + regular-expression (true). Deprecated: for AlertManager + >= v0.22.0, `matchType` should be used instead.' + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + type: object + type: array + muteTimeIntervals: + description: List of MuteTimeInterval specifying when the routes should + be muted. + items: + description: MuteTimeInterval specifies the periods in time when + notifications will be muted + properties: + name: + description: Name of the time interval + type: string + timeIntervals: + description: TimeIntervals is a list of TimeInterval + items: + description: TimeInterval describes intervals of time + properties: + daysOfMonth: + description: DaysOfMonth is a list of DayOfMonthRange + items: + description: DayOfMonthRange is an inclusive range of + days of the month beginning at 1 + properties: + end: + description: End of the inclusive range + maximum: 31 + minimum: -31 + type: integer + start: + description: Start of the inclusive range + maximum: 31 + minimum: -31 + type: integer + type: object + type: array + months: + description: Months is a list of MonthRange + items: + description: MonthRange is an inclusive range of months + of the year beginning in January Months can be specified + by name (e.g 'January') by numerical month (e.g '1') + or as an inclusive range (e.g 'January:March', '1:3', + '1:March') + pattern: ^((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12]))$)|$) + type: string + type: array + times: + description: Times is a list of TimeRange + items: + description: TimeRange defines a start and end time + in 24hr format + properties: + endTime: + description: EndTime is the end time in 24hr format. + pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$) + type: string + startTime: + description: StartTime is the start time in 24hr + format. + pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$) + type: string + type: object + type: array + weekdays: + description: Weekdays is a list of WeekdayRange + items: + description: WeekdayRange is an inclusive range of days + of the week beginning on Sunday Days can be specified + by name (e.g 'Sunday') or as an inclusive range (e.g + 'Monday:Friday') + pattern: ^((?i)sun|mon|tues|wednes|thurs|fri|satur)day(?:((:(sun|mon|tues|wednes|thurs|fri|satur)day)$)|$) + type: string + type: array + years: + description: Years is a list of YearRange + items: + description: YearRange is an inclusive range of years + pattern: ^2\d{3}(?::2\d{3}|$) + type: string + type: array + type: object + type: array + type: object + type: array + receivers: + description: List of receivers. + items: + description: Receiver defines one or more notification integrations. + properties: + discordConfigs: + description: List of Discord configurations. + items: + description: DiscordConfig configures notifications via Discord. + See https://prometheus.io/docs/alerting/latest/configuration/#discord_config + properties: + apiURL: + description: The secret's key that contains the Discord + webhook URL. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: The template of the message's body. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + title: + description: The template of the message's title. + type: string + required: + - apiURL + type: object + type: array + emailConfigs: + description: List of Email configurations. + items: + description: EmailConfig configures notifications via Email. + properties: + authIdentity: + description: The identity to use for authentication. + type: string + authPassword: + description: The secret's key that contains the password + to use for authentication. The secret needs to be in + the same namespace as the AlertmanagerConfig object + and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authSecret: + description: The secret's key that contains the CRAM-MD5 + secret. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authUsername: + description: The username to use for authentication. + type: string + from: + description: The sender address. + type: string + headers: + description: Further headers email header key/value pairs. + Overrides any headers previously set by the notification + implementation. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + hello: + description: The hostname to identify to the SMTP server. + type: string + html: + description: The HTML body of the email notification. + type: string + requireTLS: + description: The SMTP TLS requirement. Note that Go does + not support unencrypted connections to remote SMTP endpoints. + type: boolean + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + smarthost: + description: The SMTP host and port through which emails + are sent. E.g. example.com:25 + type: string + text: + description: The text body of the email notification. + type: string + tlsConfig: + description: TLS configuration + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing + client-authentication. + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file + for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + to: + description: The email address to send notifications to. + type: string + type: object + type: array + msteamsConfigs: + description: List of MSTeams configurations. It requires Alertmanager + >= 0.26.0. + items: + description: MSTeamsConfig configures notifications via Microsoft + Teams. It requires Alertmanager >= 0.26.0. + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + sendResolved: + description: Whether to notify about resolved alerts. + type: boolean + text: + description: Message body template. + type: string + title: + description: Message title template. + type: string + webhookUrl: + description: MSTeams webhook URL. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - webhookUrl + type: object + type: array + name: + description: Name of the receiver. Must be unique across all + items from the list. + minLength: 1 + type: string + opsgenieConfigs: + description: List of OpsGenie configurations. + items: + description: OpsGenieConfig configures notifications via OpsGenie. + See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config + properties: + actions: + description: Comma separated list of actions that will + be available for the alert. + type: string + apiKey: + description: The secret's key that contains the OpsGenie + API key. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiURL: + description: The URL to send OpsGenie API requests to. + type: string + description: + description: Description of the incident. + type: string + details: + description: A set of arbitrary key/value pairs that provide + further detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + entity: + description: Optional field that can be used to specify + which domain alert is related to. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: Alert text limited to 130 characters. + type: string + note: + description: Additional alert note. + type: string + priority: + description: Priority level of alert. Possible values + are P1, P2, P3, P4, and P5. + type: string + responders: + description: List of responders responsible for notifications. + items: + description: OpsGenieConfigResponder defines a responder + to an incident. One of `id`, `name` or `username` + has to be defined. + properties: + id: + description: ID of the responder. + type: string + name: + description: Name of the responder. + type: string + type: + description: Type of responder. + enum: + - team + - teams + - user + - escalation + - schedule + minLength: 1 + type: string + username: + description: Username of the responder. + type: string + required: + - type + type: object + type: array + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + source: + description: Backlink to the sender of the notification. + type: string + tags: + description: Comma separated list of tags attached to + the notifications. + type: string + updateAlerts: + description: Whether to update message and description + of the alert in OpsGenie if it already exists By default, + the alert is never updated in OpsGenie, the new message + only appears in activity log. + type: boolean + type: object + type: array + pagerdutyConfigs: + description: List of PagerDuty configurations. + items: + description: PagerDutyConfig configures notifications via + PagerDuty. See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config + properties: + class: + description: The class/type of the event. + type: string + client: + description: Client identification. + type: string + clientURL: + description: Backlink to the sender of notification. + type: string + component: + description: The part or component of the affected system + that is broken. + type: string + description: + description: Description of the incident. + type: string + details: + description: Arbitrary key/value pairs that provide further + detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + group: + description: A cluster or grouping of sources. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + pagerDutyImageConfigs: + description: A list of image details to attach that provide + further detail about an incident. + items: + description: PagerDutyImageConfig attaches images to + an incident + properties: + alt: + description: Alt is the optional alternative text + for the image. + type: string + href: + description: Optional URL; makes the image a clickable + link. + type: string + src: + description: Src of the image being attached to + the incident + type: string + type: object + type: array + pagerDutyLinkConfigs: + description: A list of link details to attach that provide + further detail about an incident. + items: + description: PagerDutyLinkConfig attaches text links + to an incident + properties: + alt: + description: Text that describes the purpose of + the link, and can be used as the link's text. + type: string + href: + description: Href is the URL of the link to be attached + type: string + type: object + type: array + routingKey: + description: The secret's key that contains the PagerDuty + integration key (when using Events API v2). Either this + field or `serviceKey` needs to be defined. The secret + needs to be in the same namespace as the AlertmanagerConfig + object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + serviceKey: + description: The secret's key that contains the PagerDuty + service key (when using integration type "Prometheus"). + Either this field or `routingKey` needs to be defined. + The secret needs to be in the same namespace as the + AlertmanagerConfig object and accessible by the Prometheus + Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + severity: + description: Severity of the incident. + type: string + url: + description: The URL to send requests to. + type: string + type: object + type: array + pushoverConfigs: + description: List of Pushover configurations. + items: + description: PushoverConfig configures notifications via Pushover. + See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config + properties: + device: + description: The name of a device to send the notification + to + type: string + expire: + description: How long your notification will continue + to be retried for, unless the user acknowledges the + notification. + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + type: string + html: + description: Whether notification message is HTML or plain + text. + type: boolean + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: Notification message. + type: string + priority: + description: Priority, see https://pushover.net/api#priority + type: string + retry: + description: How often the Pushover servers will send + the same notification to the user. Must be at least + 30 seconds. + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + sound: + description: The name of one of the sounds supported by + device clients to override the user's default sound + choice + type: string + title: + description: Notification title. + type: string + token: + description: The secret's key that contains the registered + application's API token, see https://pushover.net/apps. + The secret needs to be in the same namespace as the + AlertmanagerConfig object and accessible by the Prometheus + Operator. Either `token` or `tokenFile` is required. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + tokenFile: + description: The token file that contains the registered + application's API token, see https://pushover.net/apps. + Either `token` or `tokenFile` is required. It requires + Alertmanager >= v0.26.0. + type: string + url: + description: A supplementary URL shown alongside the message. + type: string + urlTitle: + description: A title for supplementary URL, otherwise + just the URL is shown + type: string + userKey: + description: The secret's key that contains the recipient + user's user key. The secret needs to be in the same + namespace as the AlertmanagerConfig object and accessible + by the Prometheus Operator. Either `userKey` or `userKeyFile` + is required. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + userKeyFile: + description: The user key file that contains the recipient + user's user key. Either `userKey` or `userKeyFile` is + required. It requires Alertmanager >= v0.26.0. + type: string + type: object + type: array + slackConfigs: + description: List of Slack configurations. + items: + description: SlackConfig configures notifications via Slack. + See https://prometheus.io/docs/alerting/latest/configuration/#slack_config + properties: + actions: + description: A list of Slack actions that are sent with + each notification. + items: + description: SlackAction configures a single Slack action + that is sent with each notification. See https://api.slack.com/docs/message-attachments#action_fields + and https://api.slack.com/docs/message-buttons for + more information. + properties: + confirm: + description: SlackConfirmationField protect users + from destructive actions or particularly distinguished + decisions by asking them to confirm their button + click one more time. See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields + for more information. + properties: + dismissText: + type: string + okText: + type: string + text: + minLength: 1 + type: string + title: + type: string + required: + - text + type: object + name: + type: string + style: + type: string + text: + minLength: 1 + type: string + type: + minLength: 1 + type: string + url: + type: string + value: + type: string + required: + - text + - type + type: object + type: array + apiURL: + description: The secret's key that contains the Slack + webhook URL. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + callbackId: + type: string + channel: + description: The channel or user to send notifications + to. + type: string + color: + type: string + fallback: + type: string + fields: + description: A list of Slack fields that are sent with + each notification. + items: + description: SlackField configures a single Slack field + that is sent with each notification. Each field must + contain a title, value, and optionally, a boolean + value to indicate if the field is short enough to + be displayed next to other fields designated as short. + See https://api.slack.com/docs/message-attachments#fields + for more information. + properties: + short: + type: boolean + title: + minLength: 1 + type: string + value: + minLength: 1 + type: string + required: + - title + - value + type: object + type: array + footer: + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + iconEmoji: + type: string + iconURL: + type: string + imageURL: + type: string + linkNames: + type: boolean + mrkdwnIn: + items: + type: string + type: array + pretext: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + shortFields: + type: boolean + text: + type: string + thumbURL: + type: string + title: + type: string + titleLink: + type: string + username: + type: string + type: object + type: array + snsConfigs: + description: List of SNS configurations + items: + description: SNSConfig configures notifications via AWS SNS. + See https://prometheus.io/docs/alerting/latest/configuration/#sns_configs + properties: + apiURL: + description: The SNS API URL i.e. https://sns.us-east-2.amazonaws.com. + If not specified, the SNS API URL from the SNS SDK will + be used. + type: string + attributes: + additionalProperties: + type: string + description: SNS message attributes. + type: object + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: The message content of the SNS notification. + type: string + phoneNumber: + description: Phone number if message is delivered via + SMS in E.164 format. If you don't specify this value, + you must specify a value for the TopicARN or TargetARN. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + sigv4: + description: Configures AWS's Signature Verification 4 + signing process to sign requests. + properties: + accessKey: + description: AccessKey is the AWS API key. If not + specified, the environment variable `AWS_ACCESS_KEY_ID` + is used. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + profile: + description: Profile is the named AWS profile used + to authenticate. + type: string + region: + description: Region is the AWS region. If blank, the + region from the default credentials chain used. + type: string + roleArn: + description: RoleArn is the named AWS profile used + to authenticate. + type: string + secretKey: + description: SecretKey is the AWS API secret. If not + specified, the environment variable `AWS_SECRET_ACCESS_KEY` + is used. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + subject: + description: Subject line when the message is delivered + to email endpoints. + type: string + targetARN: + description: The mobile platform endpoint ARN if message + is delivered via mobile notifications. If you don't + specify this value, you must specify a value for the + topic_arn or PhoneNumber. + type: string + topicARN: + description: SNS topic ARN, i.e. arn:aws:sns:us-east-2:698519295917:My-Topic + If you don't specify this value, you must specify a + value for the PhoneNumber or TargetARN. + type: string + type: object + type: array + telegramConfigs: + description: List of Telegram configurations. + items: + description: TelegramConfig configures notifications via Telegram. + See https://prometheus.io/docs/alerting/latest/configuration/#telegram_config + properties: + apiURL: + description: The Telegram API URL i.e. https://api.telegram.org. + If not specified, default API URL will be used. + type: string + botToken: + description: "Telegram bot token. It is mutually exclusive + with `botTokenFile`. The secret needs to be in the same + namespace as the AlertmanagerConfig object and accessible + by the Prometheus Operator. \n Either `botToken` or + `botTokenFile` is required." + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + botTokenFile: + description: "File to read the Telegram bot token from. + It is mutually exclusive with `botToken`. Either `botToken` + or `botTokenFile` is required. \n It requires Alertmanager + >= v0.26.0." + type: string + chatID: + description: The Telegram chat ID. + format: int64 + type: integer + disableNotifications: + description: Disable telegram notifications + type: boolean + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: Message template + type: string + parseMode: + description: Parse mode for telegram message + enum: + - MarkdownV2 + - Markdown + - HTML + type: string + sendResolved: + description: Whether to notify about resolved alerts. + type: boolean + type: object + type: array + victoropsConfigs: + description: List of VictorOps configurations. + items: + description: VictorOpsConfig configures notifications via + VictorOps. See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config + properties: + apiKey: + description: The secret's key that contains the API key + to use when talking to the VictorOps API. The secret + needs to be in the same namespace as the AlertmanagerConfig + object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiUrl: + description: The VictorOps API URL. + type: string + customFields: + description: Additional custom fields for notification. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + entityDisplayName: + description: Contains summary of the alerted problem. + type: string + httpConfig: + description: The HTTP client's configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + messageType: + description: Describes the behavior of the alert (CRITICAL, + WARNING, INFO). + type: string + monitoringTool: + description: The monitoring tool the state message is + from. + type: string + routingKey: + description: A key used to map the alert to a team. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + stateMessage: + description: Contains long explanation of the alerted + problem. + type: string + type: object + type: array + webexConfigs: + description: List of Webex configurations. + items: + description: WebexConfig configures notification via Cisco + Webex See https://prometheus.io/docs/alerting/latest/configuration/#webex_config + properties: + apiURL: + description: The Webex Teams API URL i.e. https://webexapis.com/v1/messages + Provide if different from the default API URL. + pattern: ^https?://.+$ + type: string + httpConfig: + description: The HTTP client's configuration. You must + supply the bot token via the `httpConfig.authorization` + field. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: Message template + type: string + roomID: + description: ID of the Webex Teams room where to send + the messages. + minLength: 1 + type: string + sendResolved: + description: Whether to notify about resolved alerts. + type: boolean + required: + - roomID + type: object + type: array + webhookConfigs: + description: List of webhook configurations. + items: + description: WebhookConfig configures notifications via a + generic receiver supporting the webhook payload. See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + maxAlerts: + description: Maximum number of alerts to be sent per webhook + message. When 0, all alerts are included. + format: int32 + minimum: 0 + type: integer + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + url: + description: The URL to send HTTP POST requests to. `urlSecret` + takes precedence over `url`. One of `urlSecret` and + `url` should be defined. + type: string + urlSecret: + description: The secret's key that contains the webhook + URL to send HTTP requests to. `urlSecret` takes precedence + over `url`. One of `urlSecret` and `url` should be defined. + The secret needs to be in the same namespace as the + AlertmanagerConfig object and accessible by the Prometheus + Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + wechatConfigs: + description: List of WeChat configurations. + items: + description: WeChatConfig configures notifications via WeChat. + See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config + properties: + agentID: + type: string + apiSecret: + description: The secret's key that contains the WeChat + API key. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiURL: + description: The WeChat API URL. + type: string + corpID: + description: The corp id for authentication. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the + namespace that contains the credentials for + authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. + The value is case-insensitive. \n \"Basic\" + is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a + Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a + Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the + client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch + a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a + Secret or ConfigMap containing the OAuth2 client''s + ID.' + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of + a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the + HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes + used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to + fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when + doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: API request data as defined by the WeChat + API. + type: string + messageType: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + toParty: + type: string + toTag: + type: string + toUser: + type: string + type: object + type: array + required: + - name + type: object + type: array + route: + description: The Alertmanager route definition for alerts matching + the resource's namespace. If present, it will be added to the generated + Alertmanager configuration as a first-level route. + properties: + activeTimeIntervals: + description: ActiveTimeIntervals is a list of MuteTimeInterval + names when this route should be active. + items: + type: string + type: array + continue: + description: Boolean indicating whether an alert should continue + matching subsequent sibling nodes. It will always be overridden + to true for the first-level route by the Prometheus operator. + type: boolean + groupBy: + description: List of labels to group by. Labels must not be repeated + (unique list). Special label "..." (aggregate by all possible + labels), if provided, must be the only element in the list. + items: + type: string + type: array + groupInterval: + description: 'How long to wait before sending an updated notification. + Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + Example: "5m"' + type: string + groupWait: + description: 'How long to wait before sending the initial notification. + Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + Example: "30s"' + type: string + matchers: + description: 'List of matchers that the alert''s labels should + match. For the first level route, the operator removes any existing + equality and regexp matcher on the `namespace` label and adds + a `namespace: ` matcher.' + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager + >= v0.22.0 and takes precedence over Regex (deprecated) + if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: 'Whether to match on equality (false) or regular-expression + (true). Deprecated: for AlertManager >= v0.22.0, `matchType` + should be used instead.' + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + muteTimeIntervals: + description: 'Note: this comment applies to the field definition + above but appears below otherwise it gets included in the generated + manifest. CRD schema doesn''t support self-referential types + for now (see https://github.com/kubernetes/kubernetes/issues/62872). + We have to use an alternative type to circumvent the limitation. + The downside is that the Kube API can''t validate the data beyond + the fact that it is a valid JSON representation. MuteTimeIntervals + is a list of MuteTimeInterval names that will mute this route + when matched,' + items: + type: string + type: array + receiver: + description: Name of the receiver for this route. If not empty, + it should be listed in the `receivers` field. + type: string + repeatInterval: + description: 'How long to wait before repeating the last notification. + Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + Example: "4h"' + type: string + routes: + description: Child routes. + items: + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagers.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagers.yaml new file mode 100644 index 000000000..5feaee175 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-alertmanagers.yaml @@ -0,0 +1,7428 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.2/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.71.2 + name: alertmanagers.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Alertmanager + listKind: AlertmanagerList + plural: alertmanagers + shortNames: + - am + singular: alertmanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Alertmanager + jsonPath: .spec.version + name: Version + type: string + - description: The number of desired replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: The number of ready replicas + jsonPath: .status.availableReplicas + name: Ready + type: integer + - jsonPath: .status.conditions[?(@.type == 'Reconciled')].status + name: Reconciled + type: string + - jsonPath: .status.conditions[?(@.type == 'Available')].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Whether the resource reconciliation is paused or not + jsonPath: .status.paused + name: Paused + priority: 1 + type: boolean + name: v1 + schema: + openAPIV3Schema: + description: Alertmanager describes an Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Alertmanager + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalPeers: + description: AdditionalPeers allows injecting a set of additional + Alertmanagers to peer with to form a highly available cluster. + items: + type: string + type: array + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alertmanagerConfigMatcherStrategy: + description: The AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig + objects match the alerts. In the future more options may be added. + properties: + type: + default: OnNamespace + description: If set to `OnNamespace`, the operator injects a label + matcher matching the namespace of the AlertmanagerConfig object + for all its routes and inhibition rules. `None` will not add + any additional matchers other than the ones specified in the + AlertmanagerConfig. Default is `OnNamespace`. + enum: + - OnNamespace + - None + type: string + type: object + alertmanagerConfigNamespaceSelector: + description: Namespaces to be selected for AlertmanagerConfig discovery. + If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + alertmanagerConfigSelector: + description: AlertmanagerConfigs to be selected for to merge and configure + Alertmanager with. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + alertmanagerConfiguration: + description: 'EXPERIMENTAL: alertmanagerConfiguration specifies the + configuration of Alertmanager. If defined, it takes precedence over + the `configSecret` field. This field may change in future releases.' + properties: + global: + description: Defines the global parameters of the Alertmanager + configuration. + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the + client. This is mutually exclusive with BasicAuth and + is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace + that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The + value is case-insensitive. \n \"Basic\" is not a + supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, BasicAuth + takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret + containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret + containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. The + secret needs to be in the same namespace as the Alertmanager + object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client + should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a + token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret + or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a + Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP + parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used + for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch + the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying + server certificates. + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing + client-authentication. + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file + for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + opsGenieApiKey: + description: The default OpsGenie API Key. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + opsGenieApiUrl: + description: The default OpsGenie API URL. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + pagerdutyUrl: + description: The default Pagerduty URL. + type: string + resolveTimeout: + description: ResolveTimeout is the default value used by alertmanager + if the alert does not include EndsAt, after this time passes + it can declare the alert as resolved if it has not been + updated. This has no impact on alerts from Prometheus, as + they always include EndsAt. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + slackApiUrl: + description: The default Slack API URL. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + smtp: + description: Configures global SMTP parameters. + properties: + authIdentity: + description: SMTP Auth using PLAIN + type: string + authPassword: + description: SMTP Auth using LOGIN and PLAIN. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authSecret: + description: SMTP Auth using CRAM-MD5. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authUsername: + description: SMTP Auth using CRAM-MD5, LOGIN and PLAIN. + If empty, Alertmanager doesn't authenticate to the SMTP + server. + type: string + from: + description: The default SMTP From header field. + type: string + hello: + description: The default hostname to identify to the SMTP + server. + type: string + requireTLS: + description: The default SMTP TLS requirement. Note that + Go does not support unencrypted connections to remote + SMTP endpoints. + type: boolean + smartHost: + description: The default SMTP smarthost used for sending + emails. + properties: + host: + description: Defines the host's address, it can be + a DNS name or a literal IP address. + minLength: 1 + type: string + port: + description: Defines the host's port, it can be a + literal port number or a port name. + minLength: 1 + type: string + required: + - host + - port + type: object + type: object + type: object + name: + description: The name of the AlertmanagerConfig resource which + is used to generate the Alertmanager configuration. It must + be defined in the same namespace as the Alertmanager object. + The operator will not enforce a `namespace` label for routes + and inhibition rules. + minLength: 1 + type: string + templates: + description: Custom notification templates. + items: + description: SecretOrConfigMap allows to specify data as a Secret + or ConfigMap. Fields are mutually exclusive. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + type: object + automountServiceAccountToken: + description: 'AutomountServiceAccountToken indicates whether a service + account token should be automatically mounted in the pod. If the + service account has `automountServiceAccountToken: true`, set the + field to `false` to opt out of automounting API credentials.' + type: boolean + baseImage: + description: 'Base image that is used to deploy pods, without tag. + Deprecated: use ''image'' instead.' + type: string + clusterAdvertiseAddress: + description: 'ClusterAdvertiseAddress is the explicit address to advertise + in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. + [1] RFC1918: https://tools.ietf.org/html/rfc1918' + type: string + clusterGossipInterval: + description: Interval between gossip attempts. + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + clusterLabel: + description: Defines the identifier that uniquely identifies the Alertmanager + cluster. You should only set it when the Alertmanager cluster includes + Alertmanager instances which are external to this Alertmanager resource. + In practice, the addresses of the external instances are provided + via the `.spec.additionalPeers` field. + type: string + clusterPeerTimeout: + description: Timeout for cluster peering. + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + clusterPushpullInterval: + description: Interval between pushpull attempts. + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Alertmanager object, which shall be mounted into the Alertmanager + Pods. Each ConfigMap is added to the StatefulSet definition as a + volume named `configmap-`. The ConfigMaps are mounted + into `/etc/alertmanager/configmaps/` in the 'alertmanager' + container. + items: + type: string + type: array + configSecret: + description: "ConfigSecret is the name of a Kubernetes Secret in the + same namespace as the Alertmanager object, which contains the configuration + for this Alertmanager instance. If empty, it defaults to `alertmanager-`. + \n The Alertmanager configuration should be available under the + `alertmanager.yaml` key. Additional keys from the original secret + are copied to the generated secret and mounted into the `/etc/alertmanager/config` + directory in the `alertmanager` container. \n If either the secret + or the `alertmanager.yaml` key is missing, the operator provisions + a minimal Alertmanager configuration with one empty receiver (effectively + dropping alert notifications)." + type: string + containers: + description: 'Containers allows injecting additional containers. This + is meant to allow adding an authentication proxy to an Alertmanager + pod. Containers described here modify an operator generated container + if they share the same name and modifications are done via a strategic + merge patch. The current container names are: `alertmanager` and + `config-reloader`. Overriding containers is entirely outside the + scope of what the maintainers will support and by doing so, you + accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + externalUrl: + description: The external URL the Alertmanager instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Alertmanager is not served from root of a DNS name. + type: string + forceEnableClusterMode: + description: ForceEnableClusterMode ensures Alertmanager does not + deactivate the cluster mode when running with a single replica. + Use case is e.g. spanning an Alertmanager cluster across Kubernetes + clusters with a single replica in each. + type: boolean + hostAliases: + description: Pods' hostAliases configuration + items: + description: HostAlias holds the mapping between IP and hostnames + that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + required: + - hostnames + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + image: + description: Image if specified has precedence over baseImage, tag + and sha combinations. Specifying the version is still necessary + to ensure the Prometheus Operator knows what version of Alertmanager + is being configured. + type: string + imagePullPolicy: + description: Image pull policy for the 'alertmanager', 'init-config-reloader' + and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same + namespace to use for pulling prometheus and alertmanager images + from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Alertmanager configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart + of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + InitContainers described here modify an operator generated init + containers if they share the same name and modifications are done + via a strategic merge patch. The current init container name is: + `init-config-reloader`. Overriding init containers is entirely outside + the scope of what the maintainers will support and by doing so, + you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Alertmanager server listen on loopback, + so that it does not bind against the Pod IP. Note this is only for + the Alertmanager UI, not the gossip communication. + type: boolean + logFormat: + description: Log format for Alertmanager to be configured with. + enum: + - "" + - logfmt + - json + type: string + logLevel: + description: Log level for Alertmanager to be configured with. + enum: + - "" + - debug + - info + - warn + - error + type: string + minReadySeconds: + description: Minimum number of seconds for which a newly created pod + should be ready without any of its container crashing for it to + be considered available. Defaults to 0 (pod will be considered available + as soon as it is ready) This is an alpha field from kubernetes 1.22 + until 1.24 which requires enabling the StatefulSetMinReadySeconds + feature gate. + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + paused: + description: If set to true all actions on the underlying managed + objects are not goint to be performed, except for delete actions. + type: boolean + podMetadata: + description: "PodMetadata configures labels and annotations which + are propagated to the Alertmanager pods. \n The following items + are reserved and cannot be overridden: * \"alertmanager\" label, + set to the name of the Alertmanager instance. * \"app.kubernetes.io/instance\" + label, set to the name of the Alertmanager instance. * \"app.kubernetes.io/managed-by\" + label, set to \"prometheus-operator\". * \"app.kubernetes.io/name\" + label, set to \"alertmanager\". * \"app.kubernetes.io/version\" + label, set to the Alertmanager version. * \"kubectl.kubernetes.io/default-container\" + annotation, set to \"alertmanager\"." + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow a + client to request the generation of an appropriate name automatically. + Name is primarily intended for creation idempotence and configuration + definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + portName: + default: web + description: Port name used for the pods and governing service. Defaults + to `web`. + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + replicas: + description: Size is the expected size of the alertmanager cluster. + The controller will eventually make the size of the running cluster + equal to the expected size. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + retention: + default: 120h + description: Time duration Alertmanager shall retain data for. Default + is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` + (milliseconds seconds minutes hours). + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + routePrefix: + description: The route prefix Alertmanager registers HTTP handlers + for. This is useful, if using ExternalURL and a proxy is rewriting + HTTP routes of a request, and the actual ExternalURL is still true, + but the server serves requests under a different route prefix. For + example for use with `kubectl proxy`. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as + the Alertmanager object, which shall be mounted into the Alertmanager + Pods. Each Secret is added to the StatefulSet definition as a volume + named `secret-`. The Secrets are mounted into `/etc/alertmanager/secrets/` + in the 'alertmanager' container. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must be set if type is "Localhost". Must NOT be + set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's containers + must have the same effective HostProcess value (it is not + allowed to have a mix of HostProcess containers and non-HostProcess + containers). In addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount + to use to run the Prometheus Pods. + type: string + sha: + description: 'SHA of Alertmanager container image to be deployed. + Defaults to the value of `version`. Similar to a tag, but the SHA + explicitly deploys an immutable container image. Version and Tag + are ignored if SHA is set. Deprecated: use ''image'' instead. The + image digest can be specified as part of the image URL.' + type: string + storage: + description: Storage is the definition of how storage will be used + by the Alertmanager instances. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be removed in a future + release.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the StatefulSet. + If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: 'EphemeralVolumeSource to be used by the StatefulSet. + This is a beta field in k8s 1.21 and GA in 1.15. For lower versions, + starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC will + be deleted together with the pod. The name of the PVC will + be `-` where `` is the + name from the `PodSpec.Volumes` array entry. Pod validation + will reject the pod if the concatenated name is not valid + for a PVC (for example, too long). \n An existing PVC with + that name that is not owned by the pod will *not* be used + for the pod to avoid using an unrelated volume by mistake. + Starting the pod is then blocked until the unrelated PVC + is removed. If such a pre-created PVC is meant to be used + by the pod, the PVC has to updated with an owner reference + to the pod once the pod exists. Normally this should not + be necessary, but it may be useful when manually reconstructing + a broken cluster. \n This field is read-only and no changes + will be made by Kubernetes to the PVC after it has been + created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will + be copied into the PVC when creating it. No other fields + are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified data + source. When the AnyVolumeDataSource feature gate + is enabled, dataSource contents will be copied to + dataSourceRef, and dataSourceRef contents will be + copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from + which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a + non-empty API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the dataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource and dataSourceRef) + will be set to the same value automatically if one + of them is empty and the other is non-empty. When + namespace is specified in dataSourceRef, dataSource + isn''t set to the same value and must be empty. + There are three important differences between dataSource + and dataSourceRef: * While dataSource only allows + two specific types of objects, dataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is + specified. * While dataSource only allows local + objects, dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the namespace + field of dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It + can only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is + implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + volumeClaimTemplate: + description: Defines the PVC spec to be used by the Prometheus + StatefulSets. The easiest way to use a volume that cannot be + automatically provisioned is to use a label selector alongside + manually created PersistentVolumes. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST + resource this object represents. Servers may infer this + from the endpoint the client submits requests to. Cannot + be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to + an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value + map stored with a resource that may be set by external + tools to store and retrieve arbitrary metadata. They + are not queryable and should be preserved when modifying + objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. + Is required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be + updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Defines the desired characteristics of a volume + requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on the + contents of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be + copied to dataSourceRef, and dataSourceRef contents + will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from + which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic provisioner. + This field will replace the functionality of the dataSource + field and as such if both fields are non-empty, they + must have the same value. For backwards compatibility, + when namespace isn''t specified in dataSourceRef, both + fields (dataSource and dataSourceRef) will be set to + the same value automatically if one of them is empty + and the other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the same + value and must be empty. There are three important differences + between dataSource and dataSourceRef: * While dataSource + only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource feature gate + to be enabled. (Alpha) Using the namespace field of + dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the status + field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used by + this container. \n This is an alpha field and requires + enabling the DynamicResourceAllocation feature gate. + \n This field is immutable. It can only be set for + containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Deprecated: this field is never set.' + properties: + accessModes: + description: 'accessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResourceStatuses: + additionalProperties: + description: When a controller receives persistentvolume + claim update with ClaimResourceStatus for a resource + that it does not recognizes, then it should ignore + that update and let other controllers handle it. + type: string + description: "allocatedResourceStatuses stores status + of resource being resized for the given PVC. Key names + follow standard Kubernetes label syntax. Valid values + are either: * Un-prefixed keys: - storage - the capacity + of the volume. * Custom resources must use implementation-defined + prefixed names such as \"example.com/my-custom-resource\" + Apart from above values - keys that are unprefixed or + have kubernetes.io prefix are considered reserved and + hence may not be used. \n ClaimResourceStatus can be + in any of following states: - ControllerResizeInProgress: + State set when resize controller starts resizing the + volume in control-plane. - ControllerResizeFailed: State + set when resize has failed in resize controller with + a terminal error. - NodeResizePending: State set when + resize controller has finished resizing the volume but + further resizing of volume is needed on the node. - + NodeResizeInProgress: State set when kubelet starts + resizing the volume. - NodeResizeFailed: State set when + resizing has failed in kubelet with a terminal error. + Transient errors don't set NodeResizeFailed. For example: + if expanding a PVC for more capacity - this field can + be one of the following states: - pvc.status.allocatedResourceStatus['storage'] + = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] + = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] + = \"NodeResizeFailed\" When this field is not set, it + means that no resize operation is in progress for the + given PVC. \n A controller that receives PVC update + with previously unknown resourceName or ClaimResourceStatus + should ignore the update for the purpose it was designed. + For example - a controller that only is responsible + for resizing capacity of the volume, should ignore PVC + updates that change other valid resources associated + with PVC. \n This is an alpha field and requires enabling + RecoverVolumeExpansionFailure feature." + type: object + x-kubernetes-map-type: granular + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: "allocatedResources tracks the resources + allocated to a PVC including its capacity. Key names + follow standard Kubernetes label syntax. Valid values + are either: * Un-prefixed keys: - storage - the capacity + of the volume. * Custom resources must use implementation-defined + prefixed names such as \"example.com/my-custom-resource\" + Apart from above values - keys that are unprefixed or + have kubernetes.io prefix are considered reserved and + hence may not be used. \n Capacity reported here may + be larger than the actual capacity when a volume expansion + operation is requested. For storage quota, the larger + value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower + than the requested capacity. \n A controller that receives + PVC update with previously unknown resourceName should + ignore the update for the purpose it was designed. For + example - a controller that only is responsible for + resizing capacity of the volume, should ignore PVC updates + that change other valid resources associated with PVC. + \n This is an alpha field and requires enabling RecoverVolumeExpansionFailure + feature." + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: capacity represents the actual resources + of the underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being + resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains + details about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed + the condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the + condition transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message + indicating details about last transition. + type: string + reason: + description: reason is a unique, this should be + a short, machine understandable string that gives + the reason for condition's last transition. If + it reports "ResizeStarted" that means the underlying + persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType + is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tag: + description: 'Tag of Alertmanager container image to be deployed. + Defaults to the value of `version`. Version is ignored if Tag is + set. Deprecated: use ''image'' instead. The image tag can be specified + as part of the image URL.' + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select + the pods over which spreading will be calculated. The keys + are used to lookup values from the incoming pod labels, those + key-value labels are ANDed with labelSelector to select the + group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in + both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot + be set when LabelSelector isn't set. Keys that don't exist + in the incoming pod labels will be ignored. A null or empty + list means only match against labelSelector. \n This is a + beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods + in an eligible domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. | + zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 to become + 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation of + Skew is performed. And when the number of eligible domains + with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, scheduler + won't schedule more than maxSkew Pods to those domains. If + value is nil, the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. When value + is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For + example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains + is set to 5 and pods with the same labelSelector spread as + 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), so \"global + minimum\" is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because computed + skew will be 3(3 - 0) if new Pod is scheduled to any of the + three zones, it will violate MaxSkew. \n This is a beta field + and requires the MinDomainsInPodTopologySpread feature gate + to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. \n + If this value is nil, the behavior is equivalent to the Honor + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node + taints when calculating pod topology spread skew. Options + are: - Honor: nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + \n If this value is nil, the behavior is equivalent to the + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. We define a domain as a particular instance of + a topology. Also, we define an eligible domain as a domain + whose nodes meet the requirements of nodeAffinityPolicy and + nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain of + that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for an + incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + version: + description: Version the cluster should be on. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the alertmanager container, + that are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on + the output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + tracking are needed, c) the storage driver is specified through + a storage class, and d) the storage driver supports dynamic + volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource + for more information on the connection between this volume + type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that persist + for longer than the lifecycle of an individual pod. \n Use + CSI for light-weight local ephemeral volumes if the CSI driver + is meant to be used that way - see the documentation of the + driver for more information. \n A pod can use both types of + ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + web: + description: Defines the web command line flags when starting Alertmanager. + properties: + getConcurrency: + description: Maximum number of GET requests processed concurrently. + This corresponds to the Alertmanager's `--web.get-concurrency` + flag. + format: int32 + type: integer + httpConfig: + description: Defines HTTP parameters for web server. + properties: + headers: + description: List of headers that can be added to HTTP responses. + properties: + contentSecurityPolicy: + description: Set the Content-Security-Policy header to + HTTP responses. Unset if blank. + type: string + strictTransportSecurity: + description: Set the Strict-Transport-Security header + to HTTP responses. Unset if blank. Please make sure + that you use this with care as this header might force + browsers to load Prometheus and the other applications + hosted on the same domain and subdomains over HTTPS. + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + type: string + xContentTypeOptions: + description: Set the X-Content-Type-Options header to + HTTP responses. Unset if blank. Accepted value is nosniff. + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + enum: + - "" + - NoSniff + type: string + xFrameOptions: + description: Set the X-Frame-Options header to HTTP responses. + Unset if blank. Accepted values are deny and sameorigin. + https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + enum: + - "" + - Deny + - SameOrigin + type: string + xXSSProtection: + description: Set the X-XSS-Protection header to all responses. + Unset if blank. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection + type: string + type: object + http2: + description: Enable HTTP/2 support. Note that HTTP/2 is only + supported with TLS. When TLSConfig is not configured, HTTP/2 + will be disabled. Whenever the value of the field changes, + a rolling update will be triggered. + type: boolean + type: object + timeout: + description: Timeout for HTTP requests. This corresponds to the + Alertmanager's `--web.timeout` flag. + format: int32 + type: integer + tlsConfig: + description: Defines the TLS parameters for HTTPS. + properties: + cert: + description: Contains the TLS certificate for the server. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cipherSuites: + description: 'List of supported cipher suites for TLS versions + up to TLS 1.2. If empty, Go default cipher suites are used. + Available cipher suites are documented in the go documentation: + https://golang.org/pkg/crypto/tls/#pkg-constants' + items: + type: string + type: array + client_ca: + description: Contains the CA certificate for client certificate + authentication to the server. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientAuthType: + description: 'Server policy for client authentication. Maps + to ClientAuth Policies. For more detail on clientAuth options: + https://golang.org/pkg/crypto/tls/#ClientAuthType' + type: string + curvePreferences: + description: 'Elliptic curves that will be used in an ECDHE + handshake, in preference order. Available curves are documented + in the go documentation: https://golang.org/pkg/crypto/tls/#CurveID' + items: + type: string + type: array + keySecret: + description: Secret containing the TLS key for the server. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + description: Maximum TLS version that is acceptable. Defaults + to TLS13. + type: string + minVersion: + description: Minimum TLS version that is acceptable. Defaults + to TLS12. + type: string + preferServerCipherSuites: + description: Controls whether the server selects the client's + most preferred cipher suite, or the server's most preferred + cipher suite. If true then the server's preference, as expressed + in the order of elements in cipherSuites, is used. + type: boolean + required: + - cert + - keySecret + type: object + type: object + type: object + status: + description: 'Most recent observed status of the Alertmanager cluster. + Read-only. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Alertmanager cluster. + format: int32 + type: integer + conditions: + description: The current state of the Alertmanager object. + items: + description: Condition represents the state of the resources associated + with the Prometheus, Alertmanager or ThanosRuler resource. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status property. + format: date-time + type: string + message: + description: Human-readable message indicating details for the + condition's last transition. + type: string + observedGeneration: + description: ObservedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if `.metadata.generation` + is currently 12, but the `.status.conditions[].observedGeneration` + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + type: integer + reason: + description: Reason for the condition's last transition. + type: string + status: + description: Status of the condition. + type: string + type: + description: Type of the condition being reported. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + paused: + description: Represents whether any actions on the underlying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this + Alertmanager object (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Alertmanager + object. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Alertmanager object that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-podmonitors.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-podmonitors.yaml new file mode 100644 index 000000000..4b4a8c3b2 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-podmonitors.yaml @@ -0,0 +1,720 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.2/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.71.2 + name: podmonitors.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PodMonitor + listKind: PodMonitorList + plural: podmonitors + shortNames: + - pmon + singular: podmonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PodMonitor defines monitoring for a set of pods. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Pod selection for target discovery + by Prometheus. + properties: + attachMetadata: + description: "`attachMetadata` defines additional metadata which is + added to the discovered targets. \n It requires Prometheus >= v2.37.0." + properties: + node: + description: When set to true, Prometheus must have the `get` + permission on the `Nodes` objects. + type: boolean + type: object + jobLabel: + description: "The label to use to retrieve the job name from. `jobLabel` + selects the label from the associated Kubernetes `Pod` object which + will be used as the `job` label for all metrics. \n For example + if `jobLabel` is set to `foo` and the Kubernetes `Pod` object is + labeled with `foo: bar`, then Prometheus adds the `job=\"bar\"` + label to all ingested metrics. \n If the value of this field is + empty, the `job` label of the metrics defaults to the namespace + and name of the PodMonitor object (e.g. `/`)." + type: string + keepDroppedTargets: + description: "Per-scrape limit on the number of targets dropped by + relabeling that will be kept in memory. 0 means no limit. \n It + requires Prometheus >= v2.47.0." + format: int64 + type: integer + labelLimit: + description: "Per-scrape limit on number of labels that will be accepted + for a sample. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + labelNameLengthLimit: + description: "Per-scrape limit on length of labels name that will + be accepted for a sample. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + labelValueLengthLimit: + description: "Per-scrape limit on length of labels value that will + be accepted for a sample. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + namespaceSelector: + description: Selector to select which namespaces the Kubernetes `Pods` + objects are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names to select from. + items: + type: string + type: array + type: object + podMetricsEndpoints: + description: List of endpoints part of this PodMonitor. + items: + description: PodMetricsEndpoint defines an endpoint serving Prometheus + metrics to be scraped by Prometheus. + properties: + authorization: + description: "`authorization` configures the Authorization header + credentials to use when scraping the target. \n Cannot be + set at the same time as `basicAuth`, or `oauth2`." + properties: + credentials: + description: Selects a key of a Secret in the namespace + that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value + is case-insensitive. \n \"Basic\" is not a supported value. + \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: "`basicAuth` configures the Basic Authentication + credentials to use when scraping the target. \n Cannot be + set at the same time as `authorization`, or `oauth2`." + properties: + password: + description: '`password` specifies a key of a Secret containing + the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing + the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: "`bearerTokenSecret` specifies a key of a Secret + containing the bearer token for scraping targets. The secret + needs to be in the same namespace as the PodMonitor object + and readable by the Prometheus Operator. \n Deprecated: use + `authorization` instead." + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + description: '`enableHttp2` can be used to disable HTTP2 when + scraping the target.' + type: boolean + filterRunning: + description: "When true, the pods which are not running (e.g. + either in Failed or Succeeded state) are dropped during the + target discovery. \n If unset, the filtering is enabled. \n + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase" + type: boolean + followRedirects: + description: '`followRedirects` defines whether the scrape requests + should follow HTTP 3xx redirects.' + type: boolean + honorLabels: + description: When true, `honorLabels` preserves the metric's + labels when they collide with the target's labels. + type: boolean + honorTimestamps: + description: '`honorTimestamps` controls whether Prometheus + preserves the timestamps when exposed by the target.' + type: boolean + interval: + description: "Interval at which Prometheus scrapes the metrics + from the target. \n If empty, Prometheus uses the global scrape + interval." + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + description: '`metricRelabelings` configures the relabeling + rules to apply to the samples before ingestion.' + items: + description: "RelabelConfig allows dynamic rewriting of the + label set for targets, alerts, scraped samples and remote + write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. + \n `Uppercase` and `Lowercase` actions require Prometheus + >= v2.36.0. `DropEqual` and `KeepEqual` actions require + Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source + label values. \n Only applicable when the action is + `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: "Replacement value against which a Replace + action is performed if the regular expression matches. + \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + Separator and matched against the configured regular + expression. + items: + description: LabelName is a valid Prometheus label name + which may only contain ASCII letters, numbers, as + well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written + in a replacement. \n It is mandatory for `Replace`, + `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and + `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + oauth2: + description: "`oauth2` configures the OAuth2 settings to use + when scraping the target. \n It requires Prometheus >= 2.27.0. + \n Cannot be set at the same time as `authorization`, or `basicAuth`." + properties: + clientId: + description: '`clientId` specifies a key of a Secret or + ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret + containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters + to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for + the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the + token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + description: '`params` define optional HTTP URL parameters.' + type: object + path: + description: "HTTP path from which to scrape for metrics. \n + If empty, Prometheus uses the default value (e.g. `/metrics`)." + type: string + port: + description: "Name of the Pod port which this endpoint refers + to. \n It takes precedence over `targetPort`." + type: string + proxyUrl: + description: '`proxyURL` configures the HTTP Proxy URL (e.g. + "http://proxyserver:2195") to go through when scraping the + target.' + type: string + relabelings: + description: "`relabelings` configures the relabeling rules + to apply the target's metadata labels. \n The Operator automatically + adds relabelings for a few standard Kubernetes fields. \n + The original scrape job's name is available via the `__tmp_prometheus_job_name` + label. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + items: + description: "RelabelConfig allows dynamic rewriting of the + label set for targets, alerts, scraped samples and remote + write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. + \n `Uppercase` and `Lowercase` actions require Prometheus + >= v2.36.0. `DropEqual` and `KeepEqual` actions require + Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source + label values. \n Only applicable when the action is + `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: "Replacement value against which a Replace + action is performed if the regular expression matches. + \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + Separator and matched against the configured regular + expression. + items: + description: LabelName is a valid Prometheus label name + which may only contain ASCII letters, numbers, as + well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written + in a replacement. \n It is mandatory for `Replace`, + `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and + `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + scheme: + description: "HTTP scheme to use for scraping. \n `http` and + `https` are the expected values unless you rewrite the `__scheme__` + label via relabeling. \n If empty, Prometheus uses the default + value `http`." + enum: + - http + - https + type: string + scrapeTimeout: + description: "Timeout after which Prometheus considers the scrape + to be failed. \n If empty, Prometheus uses the global scrape + timeout unless it is less than the target's scrape interval + value in which the latter is used." + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: "Name or number of the target port of the `Pod` + object behind the Service, the port must be specified with + container port property. \n Deprecated: use 'port' instead." + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the target. + properties: + ca: + description: Certificate authority used when verifying server + certificates. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + trackTimestampsStaleness: + description: "`trackTimestampsStaleness` defines whether Prometheus + tracks staleness of the metrics that have an explicit timestamp + present in scraped data. Has no effect if `honorTimestamps` + is false. \n It requires Prometheus >= v2.48.0." + type: boolean + type: object + type: array + podTargetLabels: + description: '`podTargetLabels` defines the labels which are transferred + from the associated Kubernetes `Pod` object onto the ingested metrics.' + items: + type: string + type: array + sampleLimit: + description: '`sampleLimit` defines a per-scrape limit on the number + of scraped samples that will be accepted.' + format: int64 + type: integer + selector: + description: Label selector to select the Kubernetes `Pod` objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + targetLimit: + description: '`targetLimit` defines a limit on the number of scraped + targets that will be accepted.' + format: int64 + type: integer + required: + - selector + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-probes.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-probes.yaml new file mode 100644 index 000000000..54b141804 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-probes.yaml @@ -0,0 +1,737 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.2/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.71.2 + name: probes.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Probe + listKind: ProbeList + plural: probes + shortNames: + - prb + singular: probe + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Probe defines monitoring for a set of static targets or ingresses. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Ingress selection for target discovery + by Prometheus. + properties: + authorization: + description: Authorization section for this endpoint + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains + the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. + \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over basic + authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' + properties: + password: + description: '`password` specifies a key of a Secret containing + the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing + the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping targets. + The secret needs to be in the same namespace as the probe and accessible + by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + interval: + description: Interval at which targets are probed using the configured + prober. If not specified Prometheus' global scrape interval is used. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + jobName: + description: The job name assigned to scraped metrics by default. + type: string + keepDroppedTargets: + description: "Per-scrape limit on the number of targets dropped by + relabeling that will be kept in memory. 0 means no limit. \n It + requires Prometheus >= v2.47.0." + format: int64 + type: integer + labelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: "RelabelConfig allows dynamic rewriting of the label + set for targets, alerts, scraped samples and remote write samples. + \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. + \n `Uppercase` and `Lowercase` actions require Prometheus + >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus + >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source label + values. \n Only applicable when the action is `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: "Replacement value against which a Replace action + is performed if the regular expression matches. \n Regex capture + groups are available." + type: string + separator: + description: Separator is the string between concatenated SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing labels. + Their content is concatenated using the configured Separator + and matched against the configured regular expression. + items: + description: LabelName is a valid Prometheus label name which + may only contain ASCII letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written + in a replacement. \n It is mandatory for `Replace`, `HashMod`, + `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. + \n Regex capture groups are available." + type: string + type: object + type: array + module: + description: 'The module to use for probing specifying how to probe + the target. Example module configuring in the blackbox exporter: + https://github.com/prometheus/blackbox_exporter/blob/master/example.yml' + type: string + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap + containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing + the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters + to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the + token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token + from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + prober: + description: Specification for the prober to use for probing targets. + The prober.URL parameter is required. Targets cannot be probed if + left empty. + properties: + path: + default: /probe + description: Path to collect metrics from. Defaults to `/probe`. + type: string + proxyUrl: + description: Optional ProxyURL. + type: string + scheme: + description: HTTP scheme to use for scraping. `http` and `https` + are the expected values unless you rewrite the `__scheme__` + label via relabeling. If empty, Prometheus uses the default + value `http`. + enum: + - http + - https + type: string + url: + description: Mandatory URL of the prober. + type: string + required: + - url + type: object + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + scrapeTimeout: + description: Timeout for scraping metrics from the Prometheus exporter. + If not specified, the Prometheus global scrape timeout is used. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetLimit: + description: TargetLimit defines a limit on the number of scraped + targets that will be accepted. + format: int64 + type: integer + targets: + description: Targets defines a set of static or dynamically discovered + targets to probe. + properties: + ingress: + description: ingress defines the Ingress objects to probe and + the relabeling configuration. If `staticConfig` is also defined, + `staticConfig` takes precedence. + properties: + namespaceSelector: + description: From which namespaces to select Ingress objects. + properties: + any: + description: Boolean describing whether all namespaces + are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names to select from. + items: + type: string + type: array + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to the label set of + the target before it gets scraped. The original ingress + address is available via the `__tmp_prometheus_ingress_address` + label. It can be used to customize the probed URL. The original + scrape job''s name is available via the `__tmp_prometheus_job_name` + label. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: "RelabelConfig allows dynamic rewriting of + the label set for targets, alerts, scraped samples and + remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. + \n `Uppercase` and `Lowercase` actions require Prometheus + >= v2.36.0. `DropEqual` and `KeepEqual` actions require + Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source + label values. \n Only applicable when the action is + `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: "Replacement value against which a Replace + action is performed if the regular expression matches. + \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + Separator and matched against the configured regular + expression. + items: + description: LabelName is a valid Prometheus label + name which may only contain ASCII letters, numbers, + as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is + written in a replacement. \n It is mandatory for `Replace`, + `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and + `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + selector: + description: Selector to select the Ingress objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + staticConfig: + description: 'staticConfig defines the static list of targets + to probe and the relabeling configuration. If `ingress` is also + defined, `staticConfig` takes precedence. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.' + properties: + labels: + additionalProperties: + type: string + description: Labels assigned to all metrics scraped from the + targets. + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to the label set of + the targets before it gets scraped. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: "RelabelConfig allows dynamic rewriting of + the label set for targets, alerts, scraped samples and + remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. + \n `Uppercase` and `Lowercase` actions require Prometheus + >= v2.36.0. `DropEqual` and `KeepEqual` actions require + Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source + label values. \n Only applicable when the action is + `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: "Replacement value against which a Replace + action is performed if the regular expression matches. + \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + Separator and matched against the configured regular + expression. + items: + description: LabelName is a valid Prometheus label + name which may only contain ASCII letters, numbers, + as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is + written in a replacement. \n It is mandatory for `Replace`, + `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and + `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + static: + description: The list of hosts to probe. + items: + type: string + type: array + type: object + type: object + tlsConfig: + description: TLS configuration to use when scraping the endpoint. + properties: + ca: + description: Certificate authority used when verifying server + certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusagents.yaml b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusagents.yaml new file mode 100644 index 000000000..d573b95d1 --- /dev/null +++ b/deployment/helm/milvus/charts/pulsar/charts/kube-prometheus-stack/charts/crds/crds/crd-prometheusagents.yaml @@ -0,0 +1,8547 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.2/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.71.2 + name: prometheusagents.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PrometheusAgent + listKind: PrometheusAgentList + plural: prometheusagents + shortNames: + - promagent + singular: prometheusagent + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Prometheus agent + jsonPath: .spec.version + name: Version + type: string + - description: The number of desired replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: The number of ready replicas + jsonPath: .status.availableReplicas + name: Ready + type: integer + - jsonPath: .status.conditions[?(@.type == 'Reconciled')].status + name: Reconciled + type: string + - jsonPath: .status.conditions[?(@.type == 'Available')].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Whether the resource reconciliation is paused or not + jsonPath: .status.paused + name: Paused + priority: 1 + type: boolean + name: v1alpha1 + schema: + openAPIV3Schema: + description: PrometheusAgent defines a Prometheus agent deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Prometheus + agent. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalArgs: + description: "AdditionalArgs allows setting additional arguments for + the 'prometheus' container. \n It is intended for e.g. activating + hidden flags which are not supported by the dedicated configuration + options yet. The arguments are passed as-is to the Prometheus container + which may cause issues if they are invalid or not supported by the + given Prometheus version. \n In case of an argument conflict (e.g. + an argument which is already set by the operator itself) or when + providing an invalid argument, the reconciliation will fail and + an error will be logged." + items: + description: Argument as part of the AdditionalArgs list. + properties: + name: + description: Name of the argument, e.g. "scrape.discovery-reload-interval". + minLength: 1 + type: string + value: + description: Argument value, e.g. 30s. Can be empty for name-only + arguments (e.g. --storage.tsdb.no-lockfile) + type: string + required: + - name + type: object + type: array + additionalScrapeConfigs: + description: 'AdditionalScrapeConfigs allows specifying a key of a + Secret containing additional Prometheus scrape configurations. Scrape + configurations specified are appended to the configurations generated + by the Prometheus Operator. Job configurations specified must have + the form as specified in the official Prometheus documentation: + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + As scrape configs are appended, the user is responsible to make + sure it is valid. Note that using this feature may expose the possibility + to break upgrades of Prometheus. It is advised to review Prometheus + release notes to ensure that no incompatible scrape configs are + going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + affinity: + description: Defines the Pods' affinity scheduling rules if specified. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + apiserverConfig: + description: 'APIServerConfig allows specifying a host and auth methods + to access the Kuberntees API server. If null, Prometheus is assumed + to run inside of the cluster: it will discover the API servers automatically + and use the Pod''s CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.' + properties: + authorization: + description: "Authorization section for the API server. \n Cannot + be set at the same time as `basicAuth`, `bearerToken`, or `bearerTokenFile`." + properties: + credentials: + description: Selects a key of a Secret in the namespace that + contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + credentialsFile: + description: File to read a secret from, mutually exclusive + with `credentials`. + type: string + type: + description: "Defines the authentication type. The value is + case-insensitive. \n \"Basic\" is not a supported value. + \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: "BasicAuth configuration for the API server. \n Cannot + be set at the same time as `authorization`, `bearerToken`, or + `bearerTokenFile`." + properties: + password: + description: '`password` specifies a key of a Secret containing + the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing + the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerToken: + description: "*Warning: this field shouldn't be used because the + token value appears in clear-text. Prefer using `authorization`.* + \n Deprecated: this will be removed in a future release." + type: string + bearerTokenFile: + description: "File to read bearer token for accessing apiserver. + \n Cannot be set at the same time as `basicAuth`, `authorization`, + or `bearerToken`. \n Deprecated: this will be removed in a future + release. Prefer using `authorization`." + type: string + host: + description: Kubernetes API address consisting of a hostname or + IP address followed by an optional port number. + type: string + tlsConfig: + description: TLS Config to use for the API server. + properties: + ca: + description: Certificate authority used when verifying server + certificates. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - host + type: object + arbitraryFSAccessThroughSMs: + description: When true, ServiceMonitor, PodMonitor and Probe object + are forbidden to reference arbitrary files on the file system of + the 'prometheus' container. When a ServiceMonitor's endpoint specifies + a `bearerTokenFile` value (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'), + a malicious target can get access to the Prometheus service account's + token in the Prometheus' scrape request. Setting `spec.arbitraryFSAccessThroughSM` + to 'true' would prevent the attack. Users should instead provide + the credentials using the `spec.bearerTokenSecret` field. + properties: + deny: + type: boolean + type: object + bodySizeLimit: + description: BodySizeLimit defines per-scrape on response body size. + Only valid in Prometheus versions 2.45.0 and newer. + pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$ + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Prometheus object, which shall be mounted into the Prometheus + Pods. Each ConfigMap is added to the StatefulSet definition as a + volume named `configmap-`. The ConfigMaps are mounted + into /etc/prometheus/configmaps/ in the 'prometheus' + container. + items: + type: string + type: array + containers: + description: "Containers allows injecting additional containers or + modifying operator generated containers. This can be used to allow + adding an authentication proxy to the Pods or to change the behavior + of an operator generated container. Containers described here modify + an operator generated container if they share the same name and + modifications are done via a strategic merge patch. \n The names + of containers managed by the operator are: * `prometheus` * `config-reloader` + * `thanos-sidecar` \n Overriding containers is entirely outside + the scope of what the maintainers will support and by doing so, + you accept that this behaviour may break at any time without notice." + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + enableFeatures: + description: "Enable access to Prometheus feature flags. By default, + no features are enabled. \n Enabling features which are disabled + by default is entirely outside the scope of what the maintainers + will support and by doing so, you accept that this behaviour may + break at any time without notice. \n For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/" + items: + type: string + type: array + enableRemoteWriteReceiver: + description: "Enable Prometheus to be used as a receiver for the Prometheus + remote write protocol. \n WARNING: This is not considered an efficient + way of ingesting samples. Use it with caution for specific low-volume + use cases. It is not suitable for replacing the ingestion via scraping + and turning Prometheus into a push-based metrics collection system. + For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver + \n It requires Prometheus >= v2.33.0." + type: boolean + enforcedBodySizeLimit: + description: "When defined, enforcedBodySizeLimit specifies a global + limit on the size of uncompressed response body that will be accepted + by Prometheus. Targets responding with a body larger than this many + bytes will cause the scrape to fail. \n It requires Prometheus >= + v2.28.0." + pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$ + type: string + enforcedKeepDroppedTargets: + description: "When defined, enforcedKeepDroppedTargets specifies a + global limit on the number of targets dropped by relabeling that + will be kept in memory. The value overrides any `spec.keepDroppedTargets` + set by ServiceMonitor, PodMonitor, Probe objects unless `spec.keepDroppedTargets` + is greater than zero and less than `spec.enforcedKeepDroppedTargets`. + \n It requires Prometheus >= v2.47.0." + format: int64 + type: integer + enforcedLabelLimit: + description: "When defined, enforcedLabelLimit specifies a global + limit on the number of labels per sample. The value overrides any + `spec.labelLimit` set by ServiceMonitor, PodMonitor, Probe objects + unless `spec.labelLimit` is greater than zero and less than `spec.enforcedLabelLimit`. + \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + enforcedLabelNameLengthLimit: + description: "When defined, enforcedLabelNameLengthLimit specifies + a global limit on the length of labels name per sample. The value + overrides any `spec.labelNameLengthLimit` set by ServiceMonitor, + PodMonitor, Probe objects unless `spec.labelNameLengthLimit` is + greater than zero and less than `spec.enforcedLabelNameLengthLimit`. + \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + enforcedLabelValueLengthLimit: + description: "When not null, enforcedLabelValueLengthLimit defines + a global limit on the length of labels value per sample. The value + overrides any `spec.labelValueLengthLimit` set by ServiceMonitor, + PodMonitor, Probe objects unless `spec.labelValueLengthLimit` is + greater than zero and less than `spec.enforcedLabelValueLengthLimit`. + \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + enforcedNamespaceLabel: + description: "When not empty, a label will be added to \n 1. All metrics + scraped from `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig` + objects. 2. All metrics generated from recording rules defined in + `PrometheusRule` objects. 3. All alerts generated from alerting + rules defined in `PrometheusRule` objects. 4. All vector selectors + of PromQL expressions defined in `PrometheusRule` objects. \n The + label will not added for objects referenced in `spec.excludedFromEnforcement`. + \n The label's name is this field's value. The label's value is + the namespace of the `ServiceMonitor`, `PodMonitor`, `Probe` or + `PrometheusRule` object." + type: string + enforcedSampleLimit: + description: "When defined, enforcedSampleLimit specifies a global + limit on the number of scraped samples that will be accepted. This + overrides any `spec.sampleLimit` set by ServiceMonitor, PodMonitor, + Probe objects unless `spec.sampleLimit` is greater than zero and + less than than `spec.enforcedSampleLimit`. \n It is meant to be + used by admins to keep the overall number of samples/series under + a desired limit." + format: int64 + type: integer + enforcedTargetLimit: + description: "When defined, enforcedTargetLimit specifies a global + limit on the number of scraped targets. The value overrides any + `spec.targetLimit` set by ServiceMonitor, PodMonitor, Probe objects + unless `spec.targetLimit` is greater than zero and less than `spec.enforcedTargetLimit`. + \n It is meant to be used by admins to to keep the overall number + of targets under a desired limit." + format: int64 + type: integer + excludedFromEnforcement: + description: "List of references to PodMonitor, ServiceMonitor, Probe + and PrometheusRule objects to be excluded from enforcing a namespace + label of origin. \n It is only applicable if `spec.enforcedNamespaceLabel` + set to true." + items: + description: ObjectReference references a PodMonitor, ServiceMonitor, + Probe or PrometheusRule object. + properties: + group: + default: monitoring.coreos.com + description: Group of the referent. When not specified, it defaults + to `monitoring.coreos.com` + enum: + - monitoring.coreos.com + type: string + name: + description: Name of the referent. When not set, all resources + in the namespace are matched. + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + minLength: 1 + type: string + resource: + description: Resource of the referent. + enum: + - prometheusrules + - servicemonitors + - podmonitors + - probes + - scrapeconfigs + type: string + required: + - namespace + - resource + type: object + type: array + externalLabels: + additionalProperties: + type: string + description: The labels to add to any time series or alerts when communicating + with external systems (federation, remote storage, Alertmanager). + Labels defined by `spec.replicaExternalLabelName` and `spec.prometheusExternalLabelName` + take precedence over this list. + type: object + externalUrl: + description: The external URL under which the Prometheus service is + externally available. This is necessary to generate correct URLs + (for instance if Prometheus is accessible behind an Ingress resource). + type: string + hostAliases: + description: Optional list of hosts and IPs that will be injected + into the Pod's hosts file if specified. + items: + description: HostAlias holds the mapping between IP and hostnames + that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + required: + - hostnames + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostNetwork: + description: "Use the host's network namespace if true. \n Make sure + to understand the security implications if you want to enable it + (https://kubernetes.io/docs/concepts/configuration/overview/). \n + When hostNetwork is enabled, this will set the DNS policy to `ClusterFirstWithHostNet` + automatically." + type: boolean + ignoreNamespaceSelectors: + description: When true, `spec.namespaceSelector` from all PodMonitor, + ServiceMonitor and Probe objects will be ignored. They will only + discover targets within the namespace of the PodMonitor, ServiceMonitor + and Probe object. + type: boolean + image: + description: "Container image name for Prometheus. If specified, it + takes precedence over the `spec.baseImage`, `spec.tag` and `spec.sha` + fields. \n Specifying `spec.version` is still necessary to ensure + the Prometheus Operator knows which version of Prometheus is being + configured. \n If neither `spec.image` nor `spec.baseImage` are + defined, the operator will use the latest upstream version of Prometheus + available at the time when the operator was released." + type: string + imagePullPolicy: + description: Image pull policy for the 'prometheus', 'init-config-reloader' + and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: An optional list of references to Secrets in the same + namespace to use for pulling images from registries. See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: "InitContainers allows injecting initContainers to the + Pod definition. Those can be used to e.g. fetch secrets for injection + into the Prometheus configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart + of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + InitContainers described here modify an operator generated init + containers if they share the same name and modifications are done + via a strategic merge patch. \n The names of init container name + managed by the operator are: * `init-config-reloader`. \n Overriding + init containers is entirely outside the scope of what the maintainers + will support and by doing so, you accept that this behaviour may + break at any time without notice." + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + keepDroppedTargets: + description: "Per-scrape limit on the number of targets dropped by + relabeling that will be kept in memory. 0 means no limit. \n It + requires Prometheus >= v2.47.0." + format: int64 + type: integer + labelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. Only valid in Prometheus versions 2.45.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. Only valid in Prometheus versions 2.45.0 + and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. Only valid in Prometheus versions 2.45.0 + and newer. + format: int64 + type: integer + listenLocal: + description: When true, the Prometheus server listens on the loopback + address instead of the Pod IP's address. + type: boolean + logFormat: + description: Log format for Log level for Prometheus and the config-reloader + sidecar. + enum: + - "" + - logfmt + - json + type: string + logLevel: + description: Log level for Prometheus and the config-reloader sidecar. + enum: + - "" + - debug + - info + - warn + - error + type: string + maximumStartupDurationSeconds: + description: Defines the maximum time that the `prometheus` container's + startup probe will wait before being considered failed. The startup + probe will return success after the WAL replay is complete. If set, + the value should be greater than 60 (seconds). Otherwise it will + be equal to 600 seconds (15 minutes). + format: int32 + minimum: 60 + type: integer + minReadySeconds: + description: "Minimum number of seconds for which a newly created + Pod should be ready without any of its container crashing for it + to be considered available. Defaults to 0 (pod will be considered + available as soon as it is ready) \n This is an alpha field from + kubernetes 1.22 until 1.24 which requires enabling the StatefulSetMinReadySeconds + feature gate." + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Defines on which Nodes the Pods are scheduled. + type: object + overrideHonorLabels: + description: When true, Prometheus resolves label conflicts by renaming + the labels in the scraped data to "exported_