diff --git a/.github/actions/remove_cluster/action.yml b/.github/actions/remove_cluster/action.yml index d332825..8e17ae7 100644 --- a/.github/actions/remove_cluster/action.yml +++ b/.github/actions/remove_cluster/action.yml @@ -13,7 +13,7 @@ runs: steps: - name: Delete RonDB Helmchart shell: bash - run: helm delete --namespace=${{ inputs.namespace }} ${{ inputs.helm_chart }} + run: helm delete --namespace=${{ inputs.namespace }} ${{ inputs.helm_chart }} || true # Can't just use delete Hook, since we want to print the logs of the Pod - name: Delete Helm Test Pods diff --git a/files/scripts/backups/metadata_upload_kubectl.sh b/files/scripts/backups/metadata_upload_kubectl.sh index 4816811..a9b0785 100755 --- a/files/scripts/backups/metadata_upload_kubectl.sh +++ b/files/scripts/backups/metadata_upload_kubectl.sh @@ -6,8 +6,6 @@ # for everything we want to back up. Root cannot be used over the network. set -e -{{ include "rondb.createRcloneConfig" . }} - kubectl exec \ $MYSQLD_PODNAME \ -c mysqld \ @@ -23,8 +21,8 @@ kubectl cp \ -c mysqld $LOCAL_BACKUP_DIR ls -la $LOCAL_BACKUP_DIR -{{ include "rondb.backups.defineJobNumberEnv" $ }} -REMOTE_BACKUP_DIR={{ include "rondb.rcloneBackupRemoteName" . }}:{{ .Values.backups.s3.bucketName }}/{{ include "rondb.takeBackupPathPrefix" . }}/$JOB_NUMBER +{{ include "rondb.backups.defineBackupIdEnv" $ }} +REMOTE_BACKUP_DIR={{ include "rondb.rcloneBackupRemoteName" . }}:{{include "rondb.backups.bucketName" (dict "backupConfig" .Values.backups "global" .Values.global)}}/{{ include "rondb.takeBackupPathPrefix" . }}/$BACKUP_ID echo && rclone ls $REMOTE_BACKUP_DIR echo "Copying backup from $LOCAL_BACKUP_DIR to $REMOTE_BACKUP_DIR" rclone move $LOCAL_BACKUP_DIR $REMOTE_BACKUP_DIR diff --git a/files/scripts/backups/native_upload_kubectl.sh b/files/scripts/backups/native_upload_kubectl.sh index f99e17b..fa7957e 100755 --- a/files/scripts/backups/native_upload_kubectl.sh +++ b/files/scripts/backups/native_upload_kubectl.sh @@ -10,9 +10,9 @@ wait_pids=() NUM_NODE_GROUPS={{ .Values.clusterSize.numNodeGroups }} NUM_REPLICAS={{ .Values.clusterSize.activeDataReplicas }} -{{ include "rondb.backups.defineJobNumberEnv" $ }} -SOURCE_DIR=/home/hopsworks/data/ndb/backups/BACKUP/BACKUP-$JOB_NUMBER -REMOTE_BACKUP_DIR={{ include "rondb.rcloneBackupRemoteName" . }}:{{ .Values.backups.s3.bucketName }}/{{ include "rondb.takeBackupPathPrefix" . }}/$JOB_NUMBER +{{ include "rondb.backups.defineBackupIdEnv" $ }} +SOURCE_DIR=/home/hopsworks/data/ndb/backups/BACKUP/BACKUP-$BACKUP_ID +REMOTE_BACKUP_DIR={{ include "rondb.rcloneBackupRemoteName" . }}:{{ include "rondb.backups.bucketName" (dict "backupConfig" .Values.backups "global" .Values.global) }}/{{ include "rondb.takeBackupPathPrefix" . }}/$BACKUP_ID echo "Uploading backups from '$SOURCE_DIR' to object storage $REMOTE_BACKUP_DIR in parallel" for ((g = 0; g < NUM_NODE_GROUPS; g++)); do @@ -85,3 +85,93 @@ if [ "$FAILED" = true ]; then exit 1 fi echo ">>> Succeeded uploading all backups" + +{{ $configMap := include "rondb.backups.metadataStore.configMapName" . }} +{{- if $configMap }} + +MAX_KEYS=5000 +MAX_SIZE_BYTES=900000 +BASE_CONFIGMAP={{ $configMap }} + +get_active_configmap(){ + kubectl get cm -n {{ .Release.Namespace }} -l "app=backups-metadata,service=rondb,managed-by=cronjob,active=active" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || true +} + +log_stderr(){ + echo "$*" >&2 +} + +create_configmap_if_missing() { + local name="$1" + if ! kubectl get configmap "$name" -n {{ .Release.Namespace }} >/dev/null 2>&1; then + log_stderr "Creating ConfigMap $name" + kubectl create configmap "$name" -n {{ .Release.Namespace }} >/dev/null 2>&1 + kubectl label configmap "$name" -n {{ .Release.Namespace }} app=backups-metadata service=rondb managed-by=cronjob active=active --overwrite >/dev/null 2>&1 + fi +} + +rotate_if_needed() { + local cm="$1" + + local key_count size_bytes + key_count=$(kubectl get configmap "$cm" -n {{ .Release.Namespace }} -o json | jq '.data | length') + size_bytes=$(kubectl get configmap "$cm" -n {{ .Release.Namespace }} -o json | jq -r '.data' | wc -c) + + log_stderr "ConfigMap: $cm | Keys=$key_count | Size=${size_bytes}B" + + if (( key_count >= MAX_KEYS )) || (( size_bytes >= MAX_SIZE_BYTES )); then + log_stderr "Threshold exceeded, rotating ConfigMap..." + local suffix next_suffix new_cm + suffix="${cm#$BASE_CONFIGMAP-}" + if [[ "$suffix" =~ ^[0-9]+$ ]]; then + next_suffix=$((suffix + 1)) + else + next_suffix=1 + fi + new_cm="${BASE_CONFIGMAP}-${next_suffix}" + + # Create and label the new ConfigMap + create_configmap_if_missing "$new_cm" + + # Remove active label from old configmap + kubectl label configmap "$cm" -n {{ .Release.Namespace }} active- --overwrite >/dev/null 2>&1 || true + + log_stderr "Rotated to new ConfigMap: $new_cm" + echo "$new_cm" + else + echo "$cm" + fi +} + +ACTIVE_CM=$(get_active_configmap) +if [[ -z "$ACTIVE_CM" ]]; then + ACTIVE_CM="$BASE_CONFIGMAP" + echo "No active ConfigMap found. Creating $ACTIVE_CM ..." + create_configmap_if_missing "$ACTIVE_CM" +fi + +ACTIVE_CM=$(rotate_if_needed "$ACTIVE_CM") + +# Build backup metadata info json +echo "Updating backup metadata on ConfigMap $ACTIVE_CM " +START_TS=$(stat -c %Y {{ include "rondb.backups.backupIdFile" . }} | awk '{printf "%.3f", $1}') +END_TS=$(date +%s.%3N) + +DURATION_MS=$(awk -v start="$START_TS" -v end="$END_TS" 'BEGIN { printf "%.0f", (end - start) * 1000 }') + +START_TIME=$(date -u -d @"${START_TS%.*}" +"%Y-%m-%dT%H:%M:%S").$(printf "%03d" "${START_TS#*.}")Z +END_TIME=$(date -u -d @"${END_TS%.*}" +"%Y-%m-%dT%H:%M:%S").$(printf "%03d" "${END_TS#*.}")Z + +STATE="SUCCESS" + +PATCH_JSON=$(cat < "$BACKUP_FILE" + echo "Generated new backup ID: $BACKUP_ID" fi -echo "Job number: $JOB_NUMBER" {{- end }} {{- define "rondb.certManager.certificate.endToEnd" }} @@ -314,4 +316,173 @@ true {{- if and .Values.global .Values.global._hopsworks.externalServices .Values.global._hopsworks.externalServices.rondb .Values.global._hopsworks.externalServices.rondb.external -}} true {{- end -}} +{{- end -}} + +{{- define "rondb.backup.credentials" -}} +{{- if or (eq .backupConfig.objectStorageProvider "s3") (include "rondb.global.managedObjectStorage.s3" .) (include "rondb.global.minio" .)}} +{{- $secretName := "" }} +{{- $key := "" }} +{{- $optional := false }} +{{- if and .backupConfig.s3.keyCredentialsSecret.name .backupConfig.s3.keyCredentialsSecret.key }} +{{- $secretName = .backupConfig.s3.keyCredentialsSecret.name }} +{{- $key = .backupConfig.s3.keyCredentialsSecret.key }} +{{- $optional = false }} +{{- else if and (include "rondb.global.managedObjectStorage.s3" .) .global._hopsworks.managedObjectStorage.s3.secret .global._hopsworks.managedObjectStorage.s3.secret.name .global._hopsworks.managedObjectStorage.s3.secret.acess_key_id}} +{{- $secretName = .global._hopsworks.managedObjectStorage.s3.secret.name }} +{{- $key = .global._hopsworks.managedObjectStorage.s3.secret.acess_key_id }} +{{- $optional = false }} +{{- else if include "rondb.global.minio" . }} +{{- $secretName = "aws-credentials" }} +{{- $key = "access-key-id" }} +{{- $optional = true -}} +{{- end }} +{{- if or (lookup "v1" "Secret" .namespace $secretName ) $optional }} +- name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ $secretName }} + key: {{ $key }} + optional: {{ $optional }} +{{- end }} +{{- if and .backupConfig.s3.secretCredentialsSecret.name .backupConfig.s3.secretCredentialsSecret.key }} +{{- $secretName = .backupConfig.s3.secretCredentialsSecret.name }} +{{- $key = .backupConfig.s3.secretCredentialsSecret.key }} +{{- $optional = false }} +{{- else if and (include "rondb.global.managedObjectStorage.s3" .) .global._hopsworks.managedObjectStorage.s3.secret .global._hopsworks.managedObjectStorage.s3.secret.name .global._hopsworks.managedObjectStorage.s3.secret.secret_key_id}} +{{- $secretName = .global._hopsworks.managedObjectStorage.s3.secret.name }} +{{- $key = .global._hopsworks.managedObjectStorage.s3.secret.secret_key_id }} +{{- $optional = false }} +{{- else if include "rondb.global.minio" . }} +{{- $secretName = "aws-credentials" }} +{{- $key = "secret-access-key" }} +{{- $optional = true }} +{{- end }} +{{- if or (lookup "v1" "Secret" .namespace $secretName) $optional }} +- name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ $secretName }} + key: {{ $key }} + optional: {{ $optional }} +{{- end }} +{{- end }} +{{- end -}} + +{{- define "rondb.global.managedObjectStorage" -}} +{{- if and .global .global._hopsworks .global._hopsworks.managedObjectStorage .global._hopsworks.managedObjectStorage.enabled -}} +true +{{- end -}} +{{- end -}} + +{{- define "rondb.global.minio" -}} +{{- if and .global .global._hopsworks .global._hopsworks.minio .global._hopsworks.minio.enabled .global._hopsworks.minio.hopsfs .global._hopsworks.minio.hopsfs.enabled -}} +true +{{- end -}} +{{- end -}} + +{{- define "rondb.global.managedObjectStorage.s3" -}} +{{- if and (include "rondb.global.managedObjectStorage" .) .global._hopsworks.managedObjectStorage.s3 -}} +true +{{- end -}} +{{- end -}} + +{{- define "rondb.global.backupsEnabled" -}} +{{- if and ( or (include "rondb.global.managedObjectStorage" (dict "global" .Values.global)) (include "rondb.global.minio" (dict "global" .Values.global))) .Values.global._hopsworks.backups (hasKey .Values.global._hopsworks.backups "enabled" ) -}} +{{- if .Values.global._hopsworks.backups.enabled -}} +true +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "rondb.backups.isEnabled" -}} +{{- if hasKey .Values.backups "enabled" -}} +{{- if .Values.backups.enabled -}} +true +{{- end -}} +{{- else if include "rondb.global.backupsEnabled" . -}} +true +{{- end -}} +{{- end -}} + +# FIXME add a default value +{{- define "rondb.backups.schedule" -}} +{{- if and .Values.backups.enabled .Values.backups.schedule -}} +{{- .Values.backups.schedule -}} +{{- else if and (include "rondb.global.backupsEnabled" .) .Values.global._hopsworks.backups.schedule -}} +{{- .Values.global._hopsworks.backups.schedule -}} +{{- end -}} +{{- end -}} + +# FIXME should be changed when supporting multiple storage backends +{{- define "rondb.backups.bucketName" -}} +{{- if .backupConfig.s3.bucketName -}} +{{- .backupConfig.s3.bucketName -}} +{{- else if and (include "rondb.global.managedObjectStorage.s3" .) .global._hopsworks.managedObjectStorage.s3.bucket.name -}} +{{- .global._hopsworks.managedObjectStorage.s3.bucket.name -}} +{{- else if and (include "rondb.global.minio" .) .global._hopsworks.minio.hopsfs.bucket -}} +{{- .global._hopsworks.minio.hopsfs.bucket -}} +{{- else -}} +{{- fail "Missing bucket name configuration for backups. Please specify the bucket name either in the Rondb subchart or under global._hopsworks.managedObjectStorage." }} +{{- end -}} +{{- end -}} + + +{{- define "rondb.rcloneConfig" -}} +{{- if or (eq .backupConfig.objectStorageProvider "s3") (include "rondb.global.managedObjectStorage.s3" .) }} +type = s3 +env_auth = true +storage_class = STANDARD +{{- if .backupConfig.s3.provider }} +provider = {{ .backupConfig.s3.provider }} +{{- else if and .global .global._hopsworks (eq (upper .global._hopsworks.cloudProvider) "AWS")}} +provider = AWS +{{- else }} +provider = Other +{{- end }} +{{- if .backupConfig.s3.region }} +region = {{ .backupConfig.s3.region }} +{{- else if and (include "rondb.global.managedObjectStorage.s3" .) .global._hopsworks.managedObjectStorage.s3.region }} +region = {{ .global._hopsworks.managedObjectStorage.s3.region }} +{{- end }} +{{- if .backupConfig.s3.serverSideEncryption }} +server_side_encryption = {{ .backupConfig.s3.serverSideEncryption }} +{{- else if and (include "rondb.global.managedObjectStorage.s3" .) .global._hopsworks.managedObjectStorage.s3.serverSideEncryption }} +server_side_encryption = {{ .global._hopsworks.managedObjectStorage.s3.serverSideEncryption }} +{{- end }} +{{- if .backupConfig.s3.endpoint }} +endpoint = {{ .backupConfig.s3.endpoint }} +{{- else if and (include "rondb.global.managedObjectStorage.s3" .) .global._hopsworks.managedObjectStorage.s3.endpoint }} +endpoint = {{ .global._hopsworks.managedObjectStorage.s3.endpoint }} +{{- end }} +{{- else if include "rondb.global.minio" . }} +type = s3 +env_auth = true +storage_class = STANDARD +region = {{ .global._hopsworks.minio.region }} +provider = Other +endpoint = http://minio.service.consul:9000 +{{- end }} +{{- end -}} + + +{{- define "rondb.restoreFromBackup.backupId" -}} +{{- if .Values.restoreFromBackup.backupId -}} +{{- .Values.restoreFromBackup.backupId -}} +{{- else if and (include "rondb.global.managedObjectStorage" (dict "global" .Values.global)) .Values.global._hopsworks.restoreFromBackup .Values.global._hopsworks.restoreFromBackup.backupId -}} +{{- .Values.global._hopsworks.restoreFromBackup.backupId -}} +{{- end -}} +{{- end -}} + +{{- define "rondb.backups.metadataStore.configMapName" -}} +{{- if .Values.backups.metadataConfigmapName -}} +{{- .Values.backups.metadataConfigmapName -}} +{{- else if and (include "rondb.global.backupsEnabled" .) .Values.global._hopsworks.backups.metadataStore.configMap.ronDB -}} +{{- .Values.global._hopsworks.backups.metadataStore.configMap.ronDB -}} +{{- end -}} +{{- end -}} + +{{- define "rondb.backups.pathScheme" -}} +{{- if or (eq .Values.backups.objectStorageProvider "s3") (include "rondb.global.managedObjectStorage.s3" (dict "global" .Values.global)) (include "rondb.global.minio" (dict "global" .Values.global)) -}} +{{- "s3:/" -}} +{{- end -}} {{- end -}} \ No newline at end of file diff --git a/templates/shared_templates/_scripts.tpl b/templates/shared_templates/_scripts.tpl index 4cdc882..dac9a75 100644 --- a/templates/shared_templates/_scripts.tpl +++ b/templates/shared_templates/_scripts.tpl @@ -5,42 +5,10 @@ NODE_ID_OFFSET=$(($NODE_GROUP*3)) NODE_ID=$(($NODE_ID_OFFSET+$POD_ID+1)) {{- end -}} -{{- define "rondb.createRcloneConfig" -}} -echo "Location of rclone config file:" -rclone config file - -echo "Templating file $RCLONE_MOUNT_FILEPATH to $RCLONE_CONFIG" -cp $RCLONE_MOUNT_FILEPATH $RCLONE_CONFIG - -# Helper function to escape special characters in the variable -escape_sed() { - echo "$1" | sed -e 's/[\/&]/\\&/g' -} - -{{- if eq $.Values.restoreFromBackup.objectStorageProvider "s3" }} -# Escape the variables -ESCAPED_ACCESS_KEY_ID=$(escape_sed "$ACCESS_KEY_ID") -ESCAPED_SECRET_ACCESS_KEY=$(escape_sed "$SECRET_ACCESS_KEY") - -if [[ -z "$ACCESS_KEY_ID" ]]; then - # Use IAM Role instead - sed -i '/access_key_id/d' "$RCLONE_CONFIG" - sed -i '/secret_access_key/d' "$RCLONE_CONFIG" - sed -i 's/env_auth.*/env_auth = true/g' "$RCLONE_CONFIG" -else - sed -i "s|REG_ACCESS_KEY_ID|$ESCAPED_ACCESS_KEY_ID|g" "$RCLONE_CONFIG" - sed -i "s|REG_SECRET_ACCESS_KEY|$ESCAPED_SECRET_ACCESS_KEY|g" "$RCLONE_CONFIG" -fi -{{- end }} -{{- end }} - {{- define "rondb.mapNewNodesToBackedUpNodes" -}} -{{ include "rondb.createRcloneConfig" $ }} -{{- if eq $.Values.restoreFromBackup.objectStorageProvider "s3" }} -REMOTE_NATIVE_BACKUP_DIR={{ include "rondb.rcloneRestoreRemoteName" . }}:{{ $.Values.restoreFromBackup.s3.bucketName }}/{{ include "rondb.restoreBackupPathPrefix" . }}/$BACKUP_ID/rondb +REMOTE_NATIVE_BACKUP_DIR={{ include "rondb.rcloneRestoreRemoteName" . }}:{{ include "rondb.backups.bucketName" (dict "backupConfig" $.Values.restoreFromBackup "global" $.Values.global) }}/{{ include "rondb.restoreBackupPathPrefix" . }}/$BACKUP_ID/rondb echo "Path of remote (native) backup: $REMOTE_NATIVE_BACKUP_DIR" -{{- end }} DIRECTORY_NAMES=$(rclone lsd $REMOTE_NATIVE_BACKUP_DIR | awk '{print $NF}') OLD_NODE_IDS=($DIRECTORY_NAMES) diff --git a/templates/shared_templates/_vars.tpl b/templates/shared_templates/_vars.tpl index e8b10a6..0798ed4 100644 --- a/templates/shared_templates/_vars.tpl +++ b/templates/shared_templates/_vars.tpl @@ -38,10 +38,6 @@ backupRemote }} {{- end -}} -{{- define "rondb.rawRCloneConf" -}} -/home/hopsworks/rclone-raw.conf -{{- end -}} - --- {{- define "rondb.restoreNativeBackupJobname" -}} diff --git a/test_scripts/lifecycle-test.sh b/test_scripts/lifecycle-test.sh index 6aa68cc..065e1e5 100755 --- a/test_scripts/lifecycle-test.sh +++ b/test_scripts/lifecycle-test.sh @@ -143,6 +143,7 @@ helm upgrade -i $CLUSTER_B_NAME \ --values $backups_values_file \ --set "clusterSize.minNumRdrs=0" \ --set "backups.enabled=true" \ + --set "backups.schedule='@weekly'" --set "priorityClass=$CLUSTER_B_NAME" \ --set "mysql.credentialsSecretName=$MYSQL_SECRET_NAME" \ --set "mysql.supplyOwnSecret=true" \ @@ -170,7 +171,7 @@ deleteCluster $CLUSTER_A_NAME ######################### kubectl delete job -n $CLUSTER_B_NAME manual-backup || true -kubectl create job -n $CLUSTER_B_NAME --from=cronjob/create-backup manual-backup +kubectl create job -n $CLUSTER_B_NAME --from=cronjob/create-rondb-backup manual-backup bash .github/wait_job.sh $CLUSTER_B_NAME manual-backup 180 BACKUP_B_ID=$(getBackupId $CLUSTER_B_NAME) echo "BACKUP_B_ID is ${BACKUP_B_ID}" diff --git a/test_scripts/test_backup_restore.sh b/test_scripts/test_backup_restore.sh index ddb148e..2001961 100755 --- a/test_scripts/test_backup_restore.sh +++ b/test_scripts/test_backup_restore.sh @@ -50,7 +50,7 @@ setupFirstCluster() { helm test -n $namespace $RONDB_CLUSTER_NAME --logs --filter name=generate-data kubectl delete job -n $namespace manual-backup || true - kubectl create job -n $namespace --from=cronjob/create-backup manual-backup + kubectl create job -n $namespace --from=cronjob/create-rondb-backup manual-backup bash .github/wait_job.sh $namespace manual-backup 180 BACKUP_ID=$(getBackupId $namespace) echo "BACKUP_ID is ${BACKUP_ID}" diff --git a/values.schema.json b/values.schema.json index 810b751..0e3ffb0 100644 --- a/values.schema.json +++ b/values.schema.json @@ -1543,10 +1543,9 @@ "backupId": { "description": "The native backup ID for the backup to restore", "type": [ - "integer", + "string", "null" ], - "minimum": 1, "default": null }, "pathPrefix": { @@ -1556,9 +1555,10 @@ }, "objectStorageProvider": { "enum": [ - "s3" + "s3", + null ], - "default": "s3" + "default": null }, "s3": { "$ref": "#/$defs/s3" @@ -1587,8 +1587,8 @@ "description": "Whether, how and how often to run regular backups on the cluster", "properties": { "enabled": { - "type": "boolean", - "default": true + "type": ["boolean", "null"], + "default": null }, "pathPrefix": { "description": "Prefix of RonDB backup in the configured bucket", @@ -1597,22 +1597,25 @@ }, "schedule": { "description": "Cron schedule for backups", - "type": "string", - "default": "@weekly" + "type": ["string", "null"], + "default": null }, "objectStorageProvider": { "enum": [ - "s3" + "s3", + null ], - "default": "s3" + "default": null }, "s3": { "$ref": "#/$defs/s3" + }, + "metadataConfigmapName": { + "description": "The name of the configmap to be used to store the backups metadata information.", + "type": ["string", "null"], + "default": null } - }, - "required": [ - "enabled" - ] + } }, "globalReplication": { "type": "object", @@ -1819,8 +1822,8 @@ "type": "object", "properties": { "provider": { - "type": "string", - "default": "AWS" + "type": ["string", "null"], + "default": null }, "endpoint": { "type": [ @@ -1830,12 +1833,12 @@ "default": null }, "bucketName": { - "type": "string", - "default": "rondb-helm" + "type": ["string", "null"], + "default": null }, "region": { - "type": "string", - "default": "eu-north-1" + "type": ["string", "null"], + "default": null }, "serverSideEncryption": { "enum": [ @@ -1850,16 +1853,16 @@ "type": "object", "properties": { "name": { - "type": "string", + "type": ["string", "null"], "description": "Name of the Secret", "minLength": 1, - "default": "aws-credentials" + "default": null }, "key": { - "type": "string", + "type": ["string", "null"], "description": "Key in the Secret", "minLength": 1, - "default": "key_id" + "default": null } } }, @@ -1867,16 +1870,16 @@ "type": "object", "properties": { "name": { - "type": "string", + "type": ["string", "null"], "description": "Name of the Secret", "minLength": 1, - "default": "aws-credentials" + "default": null }, "key": { - "type": "string", + "type": ["string", "null"], "description": "Key in the Secret", "minLength": 1, - "default": "access_key" + "default": null } } } diff --git a/values.yaml b/values.yaml index b48fe55..e84c56d 100644 --- a/values.yaml +++ b/values.yaml @@ -5,22 +5,23 @@ # Schema JSON files allow defining restrictions, enums and references. # Use script .github/json_to_yaml.py to generate this file. backups: - enabled: true - objectStorageProvider: s3 + enabled: null + objectStorageProvider: null pathPrefix: rondb_backup s3: - bucketName: rondb-helm + bucketName: null endpoint: null keyCredentialsSecret: - key: key_id - name: aws-credentials - provider: AWS - region: eu-north-1 + key: null + name: null + provider: null + region: null secretCredentialsSecret: - key: access_key - name: aws-credentials + key: null + name: null serverSideEncryption: null - schedule: '@weekly' + schedule: null + metadataConfigmapName: null benchmarking: dbt2: numWarehouses: 4 @@ -289,19 +290,19 @@ restoreFromBackup: backupId: null excludeDatabases: [] excludeTables: [] - objectStorageProvider: s3 + objectStorageProvider: null pathPrefix: rondb_backup s3: - bucketName: rondb-helm + bucketName: null endpoint: null keyCredentialsSecret: - key: key_id - name: aws-credentials - provider: AWS - region: eu-north-1 + key: null + name: null + provider: null + region: null secretCredentialsSecret: - key: access_key - name: aws-credentials + key: null + name: null serverSideEncryption: null rondbConfig: ActivateRateLimits: 0 diff --git a/values/dummy_lint.yaml b/values/dummy_lint.yaml index ba2757e..2102cfb 100644 --- a/values/dummy_lint.yaml +++ b/values/dummy_lint.yaml @@ -20,6 +20,7 @@ backups: enabled: true s3: endpoint: rondb.com + bucketName: test benchmarking: enabled: true sysbench: @@ -136,12 +137,13 @@ resources: default: bla diskColumns: foo restoreFromBackup: - backupId: 1 + backupId: "1" excludeDatabases: *dummyStringArray excludeTables: *dummyStringArray objectStorageProvider: s3 s3: endpoint: rondb.com + bucketName: test rondbConfig: InitialTablespaceSizeGiB: 1 serviceAccountAnnotations: *dummyAnnotations