diff --git a/charts/cluster/Chart.yaml b/charts/cluster/Chart.yaml index 6e0bfd49b3..11c2f0c3a9 100644 --- a/charts/cluster/Chart.yaml +++ b/charts/cluster/Chart.yaml @@ -21,7 +21,7 @@ name: cluster description: Deploys and manages a CloudNativePG cluster and its associated resources. icon: https://raw.githubusercontent.com/cloudnative-pg/artwork/main/cloudnativepg-logo.svg type: application -version: 0.3.1 +version: 0.3.1-documentdb.3 sources: - https://github.com/cloudnative-pg/charts keywords: diff --git a/charts/cluster/examples/documentdb-auth-examples.yaml b/charts/cluster/examples/documentdb-auth-examples.yaml new file mode 100644 index 0000000000..ba1f9822d6 --- /dev/null +++ b/charts/cluster/examples/documentdb-auth-examples.yaml @@ -0,0 +1,280 @@ +# DocumentDB Authentication Configuration Examples + +# Option 1: Default Configuration (Secure File-Based Authentication) +# The chart automatically configures secure file-based PostgreSQL URL handling. +# Credentials are stored in a memory-backed volume and passed via --postgresql-url-file. +# +# Credential Handling: Best practice (prevents password exposure in logs/ps) +# Use Case: All environments (recommended default for password-based auth) +# Production Note: Combine with strong pg_hba rules (see Option 2 below) +--- +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 1 +ferretdb: + enabled: true +backups: + enabled: false + +# Option 2: pg_hba Host-Based Authentication - Recommended for Production +# Configure PostgreSQL to authenticate based on source IP/network. +# This is the most secure and flexible production approach. +# +# Use Case: Production, Staging with network isolation +--- +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 3 + postgresql: + pg_hba: + # Allow connections from pod network with SCRAM-SHA-256 (most secure) + - "hostssl all all 10.244.0.0/16 scram-sha-256" + # Or use md5 for broader compatibility + # - "hostssl all all 10.244.0.0/16 md5" + parameters: + # Enforce strong password encryption + password_encryption: "scram-sha-256" + # Require SSL connections + ssl: "on" + ssl_min_protocol_version: "TLSv1.3" + # Enable connection logging for security auditing + log_connections: "on" + log_disconnections: "on" +ferretdb: + enabled: true + instances: 2 +backups: + enabled: true + +# Option 3: Trust Authentication - Local Development ONLY +# WARNING: This is INSECURE! Use only for local testing. +# No password required - anyone who can reach the database can connect. +# +# Use Case: Local development on trusted networks only +--- +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 1 + postgresql: + pg_hba: + # DANGEROUS: No authentication required! + - "host all all 10.244.0.0/16 trust" +ferretdb: + enabled: true +backups: + enabled: false + +# Option 4: Mixed Authentication Rules +# Combine multiple pg_hba rules for different access patterns. +# +# Use Case: Complex environments with different access requirements +--- +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 3 + postgresql: + pg_hba: + # FerretDB pods from specific subnet with strong auth + - "hostssl all all 10.244.0.0/24 scram-sha-256" + # Allow admin subnet with certificate authentication + - "hostssl all all 10.245.0.0/24 cert" + # Legacy app subnet with md5 (less secure, but compatible) + - "hostssl all all 10.246.0.0/24 md5" + # Reject all other connections explicitly + - "reject all all 0.0.0.0/0" + parameters: + password_encryption: "scram-sha-256" + ssl: "on" + ssl_min_protocol_version: "TLSv1.2" +ferretdb: + enabled: true + instances: 2 + +# Option 5: Custom FerretDB Configuration +# Override FerretDB behavior with custom environment variables +# +# Use Case: Custom FerretDB settings, debugging +--- +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 2 +ferretdb: + enabled: true + instances: 2 + # Custom FerretDB image if needed + image: "ghcr.io/ferretdb/ferretdb" + tag: "2.5.0" + # Override resources + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "1000m" + # Add custom environment variables + # These can be used to modify FerretDB behavior + env: + - name: FERRETDB_LOG_LEVEL + value: "debug" + - name: FERRETDB_TELEMETRY + value: "disable" + # Note: PostgreSQL connection is managed via --postgresql-url-file flag + # The chart automatically creates a secure connection string from secrets + +# Option 6: Production Setup with All Security Features +# Complete production-ready configuration with all security best practices. +# +# Use Case: Production deployments +--- +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 3 + storage: + size: 100Gi + storageClass: fast-ssd + # Enable pod anti-affinity for high availability + affinity: + topologyKey: topology.kubernetes.io/zone + postgresql: + pg_hba: + # Only allow SSL connections from pod network + - "hostssl all all 10.244.0.0/16 scram-sha-256" + # Explicitly reject non-SSL connections + - "reject all all 0.0.0.0/0" + parameters: + # Security parameters + password_encryption: "scram-sha-256" + ssl: "on" + ssl_min_protocol_version: "TLSv1.3" + ssl_prefer_server_ciphers: "on" + # Connection limits + max_connections: 200 + superuser_reserved_connections: 3 + # Logging for security auditing + log_connections: "on" + log_disconnections: "on" + log_failed_authentication: "on" + log_statement: "ddl" # Log all DDL statements + # Performance tuning + shared_buffers: "4GB" + effective_cache_size: "12GB" + work_mem: "16MB" + # Enable monitoring + monitoring: + enabled: true + podMonitor: + enabled: true +ferretdb: + enabled: true + instances: 3 + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "2Gi" + cpu: "2000m" +backups: + enabled: true + provider: s3 + s3: + region: us-east-1 + bucket: production-backups + path: /documentdb + inheritFromIAMRole: true # Use IAM roles instead of access keys + scheduledBackups: + - name: daily-backup + schedule: "0 0 2 * * *" # Daily at 2 AM + backupOwnerReference: self + retentionPolicy: "30d" +# How to Find Your Pod Network CIDR +# To configure pg_hba rules, you need to know your Kubernetes pod network CIDR: +# +# Method 1: Check node pod CIDR allocation +# kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' +# +# Method 2: Check existing pod IPs +# kubectl get pods -A -o wide | grep -v "IP" | awk '{print $6}' | sort -u +# +# Method 3: Check CNI configuration +# kubectl get cm kube-proxy -n kube-system -o yaml | grep clusterCIDR +# +# Common pod CIDRs by Kubernetes distribution: +# - kind: 10.244.0.0/16 +# - minikube: 172.17.0.0/16 +# - GKE: 10.0.0.0/8 (varies) +# - EKS: 10.0.0.0/8 (varies) +# - AKS: 10.244.0.0/16 + +# Testing Authentication Configuration +# After deploying, test your authentication setup: +# +# 1. Test PostgreSQL direct connection: +# kubectl run psql-test --rm -it --image postgres:17 -- \ +# psql "$(kubectl get secret documentdb-cluster-app -o jsonpath='{.data.uri}' | base64 -d)" +# +# 2. Test FerretDB MongoDB connection: +# DB_USER=$(kubectl get secret documentdb-cluster-app -o jsonpath='{.data.username}' | base64 -d) +# DB_PASSWORD=$(kubectl get secret documentdb-cluster-app -o jsonpath='{.data.password}' | base64 -d) +# kubectl run mongo-test --rm -it --image mongo:7.0 -- \ +# mongosh "mongodb://$DB_USER:$DB_PASSWORD@documentdb-cluster-ferretdb:27017/app" +# +# 3. Check authentication logs: +# kubectl logs -l cnpg.io/cluster=documentdb-cluster | grep -i "authentication\|connection" +# +# 4. View pg_hba configuration: +# kubectl exec documentdb-cluster-1 -- cat /var/lib/postgresql/data/pgdata/pg_hba.conf + +# Troubleshooting +# Common Issues: +# +# 1. "fe_sendauth: no password supplied" +# - Check that FerretDB can reach PostgreSQL service +# - Verify pg_hba rules allow connections from FerretDB pods +# - Check FerretDB logs: kubectl logs -l app.kubernetes.io/component=ferretdb +# +# 2. "no pg_hba.conf entry for host" +# - Your pg_hba rules don't match the source IP +# - Check actual pod IPs: kubectl get pods -o wide +# - Verify your CIDR includes the FerretDB pod IPs +# +# 3. "SCRAM authentication failed" +# - Password may be incorrect +# - Or password_encryption setting doesn't match pg_hba method +# - Check: kubectl get secret documentdb-cluster-app -o yaml +# +# 4. Connection timeout +# - Check if NetworkPolicy is blocking access +# - Verify FerretDB service: kubectl get svc documentdb-cluster-ferretdb +# - Test connectivity: kubectl exec -it -- nc -zv documentdb-cluster-rw 5432 diff --git a/charts/cluster/examples/documentdb.yaml b/charts/cluster/examples/documentdb.yaml new file mode 100644 index 0000000000..b5b2a09f76 --- /dev/null +++ b/charts/cluster/examples/documentdb.yaml @@ -0,0 +1,12 @@ +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" +cluster: + instances: 1 +ferretdb: + enabled: true +backups: + enabled: false diff --git a/charts/cluster/templates/_bootstrap.tpl b/charts/cluster/templates/_bootstrap.tpl index 95bedd214f..d200ffb84e 100644 --- a/charts/cluster/templates/_bootstrap.tpl +++ b/charts/cluster/templates/_bootstrap.tpl @@ -10,6 +10,41 @@ bootstrap: {{- if .Values.cluster.initdb.owner }} owner: {{ tpl .Values.cluster.initdb.owner . }} {{- end }} + {{- if eq .Values.type "documentdb" }} + # Both pg_cron and documentdb extensions must be created in postgres database + # See: https://blog.ferretdb.io/run-ferretdb-postgres-documentdb-extension-cnpg-kubernetes/ + {{- $owner := .Values.cluster.initdb.owner | default .Values.cluster.initdb.database | default "app" }} + postInitSQL: + - CREATE EXTENSION IF NOT EXISTS pg_cron CASCADE; + - CREATE EXTENSION IF NOT EXISTS documentdb CASCADE; + - GRANT documentdb_admin_role TO {{ $owner }}; + - GRANT USAGE ON SCHEMA documentdb_api TO {{ $owner }}; + - GRANT USAGE ON SCHEMA documentdb_core TO {{ $owner }}; + - GRANT USAGE ON SCHEMA documentdb_api_catalog TO {{ $owner }}; + - GRANT USAGE ON SCHEMA documentdb_api_internal TO {{ $owner }}; + - GRANT USAGE ON SCHEMA documentdb_data TO {{ $owner }}; + - GRANT ALL ON ALL TABLES IN SCHEMA documentdb_api TO {{ $owner }}; + - GRANT ALL ON ALL SEQUENCES IN SCHEMA documentdb_api TO {{ $owner }}; + - GRANT ALL ON ALL TABLES IN SCHEMA documentdb_core TO {{ $owner }}; + - GRANT ALL ON ALL SEQUENCES IN SCHEMA documentdb_core TO {{ $owner }}; + - GRANT ALL ON ALL TABLES IN SCHEMA documentdb_api_catalog TO {{ $owner }}; + - GRANT ALL ON ALL SEQUENCES IN SCHEMA documentdb_api_catalog TO {{ $owner }}; + - GRANT ALL ON ALL TABLES IN SCHEMA documentdb_api_internal TO {{ $owner }}; + - GRANT ALL ON ALL SEQUENCES IN SCHEMA documentdb_api_internal TO {{ $owner }}; + - GRANT ALL ON ALL TABLES IN SCHEMA documentdb_data TO {{ $owner }}; + - GRANT ALL ON ALL SEQUENCES IN SCHEMA documentdb_data TO {{ $owner }}; + - GRANT CREATE ON SCHEMA documentdb_data TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_api GRANT ALL ON TABLES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_api GRANT ALL ON SEQUENCES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_core GRANT ALL ON TABLES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_core GRANT ALL ON SEQUENCES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_api_catalog GRANT ALL ON TABLES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_api_catalog GRANT ALL ON SEQUENCES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_api_internal GRANT ALL ON TABLES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_api_internal GRANT ALL ON SEQUENCES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_data GRANT ALL ON TABLES TO {{ $owner }}; + - ALTER DEFAULT PRIVILEGES IN SCHEMA documentdb_data GRANT ALL ON SEQUENCES TO {{ $owner }}; + {{- end }} {{- if or (eq .Values.type "postgis") (eq .Values.type "timescaledb") (not (empty .Values.cluster.initdb.postInitApplicationSQL)) }} postInitApplicationSQL: {{- if eq .Values.type "postgis" }} diff --git a/charts/cluster/templates/_helpers.tpl b/charts/cluster/templates/_helpers.tpl index 2bae419493..34f60b75ba 100644 --- a/charts/cluster/templates/_helpers.tpl +++ b/charts/cluster/templates/_helpers.tpl @@ -87,6 +87,8 @@ If a custom imageName is available, use it, otherwise use the defaults based on {{- printf "ghcr.io/cloudnative-pg/postgresql:%s" .Values.version.postgresql -}} {{- else if eq .Values.type "postgis" -}} {{- printf "ghcr.io/cloudnative-pg/postgis:%s-%s" .Values.version.postgresql .Values.version.postgis -}} + {{- else if eq .Values.type "documentdb" -}} + {{- printf "ghcr.io/ferretdb/postgres-documentdb:%s-%s-ferretdb-%s" (include "cluster.postgresqlMajor" .) .Values.version.documentdb .Values.version.ferretdb -}} {{- else -}} {{ fail "Invalid cluster type!" }} {{- end }} @@ -127,6 +129,8 @@ Postgres UID {{- .Values.cluster.postgresUID }} {{- else if and (eq (include "cluster.useTimescaleDBDefaults" .) "true") (eq .Values.type "timescaledb") -}} {{- 1000 -}} + {{- else if eq .Values.type "documentdb" -}} + {{- 999 -}} {{- else -}} {{- 26 -}} {{- end -}} @@ -140,6 +144,8 @@ Postgres GID {{- .Values.cluster.postgresGID }} {{- else if and (eq (include "cluster.useTimescaleDBDefaults" .) "true") (eq .Values.type "timescaledb") -}} {{- 1000 -}} + {{- else if eq .Values.type "documentdb" -}} + {{- 999 -}} {{- else -}} {{- 26 -}} {{- end -}} diff --git a/charts/cluster/templates/cluster.yaml b/charts/cluster/templates/cluster.yaml index bb9ea770cc..a956d75b8e 100644 --- a/charts/cluster/templates/cluster.yaml +++ b/charts/cluster/templates/cluster.yaml @@ -68,10 +68,14 @@ spec: {{ end }} enablePDB: {{ .Values.cluster.enablePDB }} postgresql: - {{- if or (eq .Values.type "timescaledb") (not (empty .Values.cluster.postgresql.shared_preload_libraries)) }} + {{- if or (eq .Values.type "timescaledb") (eq .Values.type "documentdb") (not (empty .Values.cluster.postgresql.shared_preload_libraries)) }} shared_preload_libraries: {{- if eq .Values.type "timescaledb" }} - timescaledb + {{- else if eq .Values.type "documentdb" }} + - pg_cron + - pg_documentdb_core + - pg_documentdb {{- end }} {{- with .Values.cluster.postgresql.shared_preload_libraries }} {{- toYaml . | nindent 6 }} @@ -93,9 +97,16 @@ spec: synchronous: {{- toYaml . | nindent 6 }} {{ end }} - {{- with .Values.cluster.postgresql.parameters }} + {{- if or (eq .Values.type "documentdb") .Values.cluster.postgresql.parameters }} parameters: + {{- if eq .Values.type "documentdb" }} + # pg_cron should connect to postgres database (not app) + # See: https://blog.ferretdb.io/run-ferretdb-postgres-documentdb-extension-cnpg-kubernetes/ + cron.database_name: "postgres" + {{- end }} + {{- with .Values.cluster.postgresql.parameters }} {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} {{- if not (and (empty .Values.cluster.roles) (empty .Values.cluster.services)) }} diff --git a/charts/cluster/templates/ferretdb.yaml b/charts/cluster/templates/ferretdb.yaml new file mode 100644 index 0000000000..20e1a32cb4 --- /dev/null +++ b/charts/cluster/templates/ferretdb.yaml @@ -0,0 +1,91 @@ +{{ if and (eq .Values.type "documentdb") .Values.ferretdb.enabled }} +{{- $dbOwner := "postgres" }} +{{- $dbName := .Values.ferretdb.database | default "postgres" }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "cluster.fullname" . }}-ferretdb + namespace: {{ include "cluster.namespace" . }} + labels: + {{- include "cluster.labels" . | nindent 4 }} + app.kubernetes.io/component: ferretdb +spec: + replicas: {{ .Values.ferretdb.instances }} + selector: + matchLabels: + {{- include "cluster.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: ferretdb + template: + metadata: + labels: + {{- include "cluster.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: ferretdb + spec: + containers: + - name: ferretdb + image: {{ .Values.ferretdb.image | default "ghcr.io/ferretdb/ferretdb" }}:{{ .Values.ferretdb.tag | default .Values.version.ferretdb }} + imagePullPolicy: {{ .Values.cluster.imagePullPolicy }} + ports: + - name: mongodb + containerPort: 27017 + protocol: TCP + args: + - --telemetry={{ .Values.ferretdb.telemetry | default "disable" }} + - --log-level={{ .Values.ferretdb.logLevel | default "info" }} + - --postgresql-url=postgresql://postgres:$(POSTGRES_PASSWORD)@{{ include "cluster.fullname" . }}-rw.{{ include "cluster.namespace" . }}:5432/postgres + {{- if and (hasKey .Values.ferretdb "auth") (not .Values.ferretdb.auth) }} + - --no-auth + {{- end }} + {{- if .Values.ferretdb.debugAddr }} + - --debug-addr={{ .Values.ferretdb.debugAddr }} + {{- end }} + {{- if .Values.ferretdb.otelTracesUrl }} + - --otel-traces-url={{ .Values.ferretdb.otelTracesUrl }} + {{- end }} + {{- range .Values.ferretdb.extraArgs }} + - {{ . }} + {{- end }} + env: + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "cluster.fullname" . }}-superuser + key: password + {{- with .Values.ferretdb.env }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ferretdb.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + livenessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 10 + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: 27017 + initialDelaySeconds: 5 + periodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "cluster.fullname" . }}-ferretdb + namespace: {{ include "cluster.namespace" . }} + labels: + {{- include "cluster.labels" . | nindent 4 }} + app.kubernetes.io/component: ferretdb +spec: + type: ClusterIP + ports: + - name: mongodb + port: 27017 + targetPort: 27017 + protocol: TCP + selector: + {{- include "cluster.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: ferretdb +{{- end }} diff --git a/charts/cluster/test/documentdb-minio-backup-restore/00-minio_cleanup-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/00-minio_cleanup-assert.yaml new file mode 100644 index 0000000000..9c0f3eb480 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/00-minio_cleanup-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: minio-cleanup +status: + succeeded: 1 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/00-minio_cleanup.yaml b/charts/cluster/test/documentdb-minio-backup-restore/00-minio_cleanup.yaml new file mode 100644 index 0000000000..3dabf47c8a --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/00-minio_cleanup.yaml @@ -0,0 +1,16 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: minio-cleanup +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: minio-cleanup + image: minio/mc + command: ['sh', '-c'] + args: + - | + mc alias set myminio https://minio.minio.svc.cluster.local minio minio123 + mc rm --recursive --force myminio/mybucket/documentdb diff --git a/charts/cluster/test/documentdb-minio-backup-restore/01-documentdb_cluster-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/01-documentdb_cluster-assert.yaml new file mode 100644 index 0000000000..4203a94d27 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/01-documentdb_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: documentdb-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/01-documentdb_cluster.yaml b/charts/cluster/test/documentdb-minio-backup-restore/01-documentdb_cluster.yaml new file mode 100644 index 0000000000..ade7a789a8 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/01-documentdb_cluster.yaml @@ -0,0 +1,35 @@ +type: documentdb +mode: standalone +version: + postgresql: "17" + documentdb: "0.106.0" + ferretdb: "2.5.0" + +cluster: + instances: 2 + storage: + size: 256Mi + +ferretdb: + enabled: true + +backups: + enabled: true + + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/documentdb/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/documentdb-minio-backup-restore/03-documentdb_test-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/03-documentdb_test-assert.yaml new file mode 100644 index 0000000000..bcfd4be76a --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/03-documentdb_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: documentdb-test +status: + succeeded: 1 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/03-documentdb_test.yaml b/charts/cluster/test/documentdb-minio-backup-restore/03-documentdb_test.yaml new file mode 100644 index 0000000000..0d03621af3 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/03-documentdb_test.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: documentdb-test +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: documentdb-cluster-app + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM pg_extension WHERE extname = '\''documentdb'\'')' --csv -q 2>/dev/null)" = "t" + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM pg_extension WHERE extname = '\''pg_cron'\'')' --csv -q 2>/dev/null)" = "t" \ No newline at end of file diff --git a/charts/cluster/test/documentdb-minio-backup-restore/03b-mongodb_client_test-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/03b-mongodb_client_test-assert.yaml new file mode 100644 index 0000000000..d55adaee71 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/03b-mongodb_client_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: documentdb-mongodb-client-test +status: + succeeded: 1 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/03b-mongodb_client_test.yaml b/charts/cluster/test/documentdb-minio-backup-restore/03b-mongodb_client_test.yaml new file mode 100644 index 0000000000..cb7ab76fe0 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/03b-mongodb_client_test.yaml @@ -0,0 +1,99 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: documentdb-mongodb-client-test +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: mongo-client-test + image: mongo:7.0 + env: + - name: FERRETDB_HOST + value: "documentdb-cluster-ferretdb" + - name: DB_USER + valueFrom: + secretKeyRef: + name: documentdb-cluster-app + key: username + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: documentdb-cluster-app + key: password + - name: DB_NAME + value: "app" + command: ['sh', '-c'] + args: + - | + # Wait a moment for the service to be ready + sleep 5 + + # Test MongoDB client connectivity and operations + mongosh "mongodb://$DB_USER:$DB_PASSWORD@$FERRETDB_HOST:27017/$DB_NAME?directConnection=true" --eval ' + print("Testing MongoDB client connectivity to DocumentDB..."); + + // Test 1: Insert a document + print("Test 1: Inserting document..."); + const insertResult = db.test_collection.insertOne({ + hello: "world", + timestamp: new Date(), + nested: { field: "value" }, + array: [1, 2, 3] + }); + print("Insert successful, _id: " + insertResult.insertedId); + + // Test 2: Find the document + print("Test 2: Finding document..."); + const doc = db.test_collection.findOne({ hello: "world" }); + if (!doc || doc.hello !== "world") { + throw new Error("Document not found or incorrect"); + } + print("Find successful: " + JSON.stringify(doc)); + + // Test 3: Update the document + print("Test 3: Updating document..."); + const updateResult = db.test_collection.updateOne( + { hello: "world" }, + { $set: { updated: true, updateTime: new Date() } } + ); + if (updateResult.modifiedCount !== 1) { + throw new Error("Update failed"); + } + print("Update successful"); + + // Test 4: Verify update + print("Test 4: Verifying update..."); + const updatedDoc = db.test_collection.findOne({ hello: "world" }); + if (!updatedDoc.updated) { + throw new Error("Update verification failed"); + } + print("Update verified"); + + // Test 5: Count documents + print("Test 5: Counting documents..."); + const count = db.test_collection.countDocuments({ hello: "world" }); + if (count !== 1) { + throw new Error("Count failed, expected 1 got " + count); + } + print("Count successful: " + count); + + // Test 6: Delete the document + print("Test 6: Deleting document..."); + const deleteResult = db.test_collection.deleteOne({ hello: "world" }); + if (deleteResult.deletedCount !== 1) { + throw new Error("Delete failed"); + } + print("Delete successful"); + + // Test 7: Verify deletion + print("Test 7: Verifying deletion..."); + const deletedDoc = db.test_collection.findOne({ hello: "world" }); + if (deletedDoc !== null) { + throw new Error("Document still exists after deletion"); + } + print("Deletion verified"); + + print("✓ All MongoDB client operations successful!"); + ' diff --git a/charts/cluster/test/documentdb-minio-backup-restore/04-data_write-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/04-data_write-assert.yaml new file mode 100644 index 0000000000..831f963d9d --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/04-data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +status: + succeeded: 1 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/04-data_write.yaml b/charts/cluster/test/documentdb-minio-backup-restore/04-data_write.yaml new file mode 100644 index 0000000000..222a8b82b2 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/04-data_write.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: configmap-creator-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: configmap-creator +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: configmap-creator-binding +subjects: +- kind: ServiceAccount + name: configmap-creator-sa +roleRef: + kind: Role + name: configmap-creator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write +spec: + template: + spec: + serviceAccountName: configmap-creator-sa + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: documentdb-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client kubectl coreutils + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + psql "$DB_URI" -c "CREATE TABLE mygoodtable (id serial PRIMARY KEY);" + sleep 5 + DATE_NO_BAD_TABLE=$(date --rfc-3339=ns) + kubectl create configmap date-no-bad-table --from-literal=date="$DATE_NO_BAD_TABLE" + sleep 5 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/05-backup.yaml b/charts/cluster/test/documentdb-minio-backup-restore/05-backup.yaml new file mode 100644 index 0000000000..69b675781a --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/05-backup.yaml @@ -0,0 +1,8 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + method: barmanObjectStore + cluster: + name: documentdb-cluster diff --git a/charts/cluster/test/documentdb-minio-backup-restore/05-backup_completed-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/05-backup_completed-assert.yaml new file mode 100644 index 0000000000..8ee91b03c8 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/05-backup_completed-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + cluster: + name: documentdb-cluster + method: barmanObjectStore +status: + phase: completed diff --git a/charts/cluster/test/documentdb-minio-backup-restore/05-backup_running-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/05-backup_running-assert.yaml new file mode 100644 index 0000000000..7c5eeada59 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/05-backup_running-assert.yaml @@ -0,0 +1,10 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: post-init-backup +spec: + cluster: + name: documentdb-cluster + method: barmanObjectStore +status: + phase: running diff --git a/charts/cluster/test/documentdb-minio-backup-restore/05-checkpoint.yaml b/charts/cluster/test/documentdb-minio-backup-restore/05-checkpoint.yaml new file mode 100644 index 0000000000..2caac61a6b --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/05-checkpoint.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: backup-checkpoint +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: create-checkpoint + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: documentdb-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + END_TIME=$(( $(date +%s) + 30 )) + while [ $(date +%s) -lt $END_TIME ]; do + psql "$DB_URI" -c "SELECT pg_switch_wal();CHECKPOINT;" + sleep 5 + done diff --git a/charts/cluster/test/documentdb-minio-backup-restore/06-post_backup_data_write-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/06-post_backup_data_write-assert.yaml new file mode 100644 index 0000000000..ad9be77a7b --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/06-post_backup_data_write-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write-post-backup +status: + succeeded: 1 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/06-post_backup_data_write.yaml b/charts/cluster/test/documentdb-minio-backup-restore/06-post_backup_data_write.yaml new file mode 100644 index 0000000000..ec004c10cf --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/06-post_backup_data_write.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-write-post-backup +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-write + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: documentdb-cluster-superuser + key: uri + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + psql "$DB_URI" -c "CREATE TABLE mybadtable (id serial PRIMARY KEY);" diff --git a/charts/cluster/test/documentdb-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml new file mode 100644 index 0000000000..2b6b9651f1 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/07-recovery_backup_pitr_cluster-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: recovery-backup-pitr-cluster +status: + readyInstances: 2 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml b/charts/cluster/test/documentdb-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml new file mode 100644 index 0000000000..e18910a0db --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/07-recovery_backup_pitr_cluster.yaml @@ -0,0 +1,48 @@ +type: documentdbdb +mode: recovery + +cluster: + instances: 2 + storage: + size: 256Mi + +recovery: + method: backup + backupName: "post-init-backup" + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/documentdb/v1" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" + +backups: + enabled: true + provider: s3 + endpointURL: "https://minio.minio.svc.cluster.local" + endpointCA: + name: kube-root-ca.crt + key: ca.crt + wal: + encryption: "" + data: + encryption: "" + s3: + bucket: "mybucket" + path: "/documentdb/v2" + accessKey: "minio" + secretKey: "minio123" + region: "local" + scheduledBackups: [] + retentionPolicy: "30d" diff --git a/charts/cluster/test/documentdb-minio-backup-restore/08-data_test-assert.yaml b/charts/cluster/test/documentdb-minio-backup-restore/08-data_test-assert.yaml new file mode 100644 index 0000000000..6f14d5f231 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/08-data_test-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup-pitr +status: + succeeded: 1 diff --git a/charts/cluster/test/documentdb-minio-backup-restore/08-data_test.yaml b/charts/cluster/test/documentdb-minio-backup-restore/08-data_test.yaml new file mode 100644 index 0000000000..5fb4faf395 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/08-data_test.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: data-test-backup-pitr +spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: data-test + env: + - name: DB_URI + valueFrom: + secretKeyRef: + name: recovery-backup-pitr-cluster-superuser + key: uri + image: alpine:3.19 + command: ['sh', '-c'] + args: + - | + apk --no-cache add postgresql-client + DB_URI=$(echo $DB_URI | sed "s|/\*|/|" ) + set -e + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mygoodtable$$)' --csv -q 2>/dev/null)" = "t" + echo "Good table exists" + test "$(psql $DB_URI -t -c 'SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = $$mybadtable$$)' --csv -q 2>/dev/null)" = "f" + echo "Bad table does not exist" diff --git a/charts/cluster/test/documentdb-minio-backup-restore/chainsaw-test.yaml b/charts/cluster/test/documentdb-minio-backup-restore/chainsaw-test.yaml new file mode 100644 index 0000000000..e88679d9c8 --- /dev/null +++ b/charts/cluster/test/documentdb-minio-backup-restore/chainsaw-test.yaml @@ -0,0 +1,144 @@ +## +# This test sets up a documentdb cluster with MinIO backups and ensured that documentdb extensions are installed and +# PITR recovery is enabled and working. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: documentdb +spec: + timeouts: + apply: 1s + assert: 5m + cleanup: 1m + steps: + - name: Clear the MinIO bucket + try: + - apply: + file: ./00-minio_cleanup.yaml + - assert: + file: ./00-minio_cleanup-assert.yaml + - name: Install a standalone documentdb cluster + try: + - script: + content: | + kubectl -n $NAMESPACE create secret generic kube-root-ca.crt --from-literal=ca.crt="$(kubectl -n kube-system get configmaps kube-root-ca.crt -o jsonpath='{.data.ca\.crt}')" --dry-run=client -o yaml | kubectl apply -f - + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./01-documentdb_cluster.yaml \ + --wait \ + documentdb ../../ + - assert: + file: ./01-documentdb_cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - podLogs: + selector: cnpg.io/cluster=documentdb-cluster + - name: Verify documentdb extensions are installed + timeouts: + apply: 1s + assert: 30s + try: + - apply: + file: 03-documentdb_test.yaml + - assert: + file: 03-documentdb_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=documentdb-test + - name: Verify MongoDB client connectivity and operations + timeouts: + apply: 1s + assert: 60s + try: + - apply: + file: 03b-mongodb_client_test.yaml + - assert: + file: 03b-mongodb_client_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=documentdb-mongodb-client-test + - name: Write some data to the cluster + timeouts: + apply: 1s + assert: 30s + try: + - apply: + file: 04-data_write.yaml + - assert: + file: 04-data_write-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + - podLogs: + selector: batch.kubernetes.io/job-name=data-test + - name: Create a backup + try: + - apply: + file: ./05-backup.yaml + - assert: + file: ./05-backup_running-assert.yaml + - apply: + file: ./05-checkpoint.yaml + - assert: + file: ./05-backup_completed-assert.yaml + - name: Write more data to the database after the backup + try: + - apply: + file: ./06-post_backup_data_write.yaml + - assert: + file: ./06-post_backup_data_write-assert.yaml + timeouts: + apply: 1s + assert: 10m + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Backup + - name: Create a recovery cluster from backup with a PITR target + try: + - script: + content: | + DATE_NO_BAD_TABLE=$(kubectl -n $NAMESPACE get configmap date-no-bad-table -o 'jsonpath={.data.date}') + helm upgrade \ + --install \ + --namespace $NAMESPACE \ + --values ./07-recovery_backup_pitr_cluster.yaml \ + --set recovery.pitrTarget.time="$DATE_NO_BAD_TABLE" \ + --wait \ + recovery-backup-pitr ../../ + - assert: + file: ./07-recovery_backup_pitr_cluster-assert.yaml + catch: + - describe: + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + - podLogs: + selector: cnpg.io/cluster=recovery-backup-pitr-cluster + - name: Verify the pre-backup data on the recovery cluster exists but not the post-backup data + try: + - apply: + file: 08-data_test.yaml + - assert: + file: 08-data_test-assert.yaml + catch: + - describe: + apiVersion: batch/v1 + kind: Job + selector: batch.kubernetes.io/job-name=data-test-backup-pitr + - podLogs: + selector: batch.kubernetes.io/job-name=data-test-backup-pitr + - name: Cleanup + try: + - script: + content: | + helm uninstall --namespace $NAMESPACE documentdb diff --git a/charts/cluster/values.yaml b/charts/cluster/values.yaml index 3d5f11dc1a..272cd4e040 100644 --- a/charts/cluster/values.yaml +++ b/charts/cluster/values.yaml @@ -10,6 +10,7 @@ namespaceOverride: "" # * `postgresql` # * `postgis` # * `timescaledb` +# * `documentdb` type: postgresql version: @@ -19,6 +20,10 @@ version: timescaledb: "2.15" # -- If using PostGIS, specify the version postgis: "3.4" + # -- If using DocumentDB, specify the version + documentdb: "0.106.0" + # -- If using DocumentDB, specify the FerretDB version + ferretdb: "2.5.0" ### # -- Cluster mode of operation. Available modes: @@ -482,6 +487,48 @@ imageCatalog: # - image: ghcr.io/your_repo/your_image:your_tag # major: 16 +# -- FerretDB gateway (DocumentDB only) +# Provides MongoDB wire protocol (port 27017) for existing MongoDB applications +ferretdb: + # -- Enable FerretDB gateway (only for type: documentdb) + enabled: false + # -- Number of FerretDB instances + instances: 1 + # -- FerretDB image (defaults to ghcr.io/ferretdb/ferretdb) + image: "" + # -- FerretDB image tag (defaults to version.ferretdb) + tag: "" + # -- Container resources + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + # -- FerretDB log level (debug, info, warn, error) + logLevel: "info" + # -- Enable/disable FerretDB telemetry + telemetry: "disable" + # -- PostgreSQL connection SSL mode (disable, require, verify-ca, verify-full) + # Uncomment to override default SSL behavior + # sslMode: "require" + # -- Enable authentication (default: true). Set to false to disable + # auth: true + # -- FerretDB operation mode (default: normal, options: diff-normal, proxy) + # mode: "normal" + # -- Expose debug/metrics endpoint (e.g. ":8088") + # debugAddr: ":8088" + # -- OpenTelemetry traces URL for distributed tracing + # otelTracesUrl: "" + # -- Additional FerretDB command-line arguments (as list) + # extraArgs: [] + # - "--listen-tls=:27018" + # - "--listen-tls-cert-file=/path/to/cert" + # - "--listen-tls-key-file=/path/to/key" + # -- Additional environment variables + env: [] + # -- List of PgBouncer poolers poolers: [] # -