diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ff88e266..b64a359d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -282,7 +282,9 @@ jobs: set -xe echo "CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION}" echo "GCS_TESTS=${GCS_TESTS}" - + GCS_ENCRYPTION_KEY=$(openssl rand -base64 32) + export GCS_ENCRYPTION_KEY + chmod +x $(pwd)/clickhouse-backup/clickhouse-backup* if [[ "${CLICKHOUSE_VERSION}" =~ 2[2-9]+ ]]; then diff --git a/ReadMe.md b/ReadMe.md index 4257be1f..94fb6324 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -304,6 +304,10 @@ gcs: custom_storage_class_map: {} debug: false # GCS_DEBUG force_http: false # GCS_FORCE_HTTP + # GCS_ENCRYPTION_KEY, base64-encoded 256-bit key for customer-supplied encryption (CSEK) + # This encrypts backup data at rest using a key you control. Generate with: `openssl rand -base64 32` + # See https://cloud.google.com/storage/docs/encryption/customer-supplied-keys + encryption_key: "" cos: url: "" # COS_URL timeout: 2m # COS_TIMEOUT diff --git a/pkg/config/config.go b/pkg/config/config.go index cc59b64b..90cc09a2 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -113,6 +113,9 @@ type GCSConfig struct { // UploadConcurrency or DownloadConcurrency in each upload and download case ClientPoolSize int `yaml:"client_pool_size" envconfig:"GCS_CLIENT_POOL_SIZE"` ChunkSize int `yaml:"chunk_size" envconfig:"GCS_CHUNK_SIZE"` + // EncryptionKey is a base64-encoded 256-bit customer-supplied encryption key (CSEK) + // for client-side encryption of objects. Use `openssl rand -base64 32` to generate. + EncryptionKey string `yaml:"encryption_key" envconfig:"GCS_ENCRYPTION_KEY"` } // AzureBlobConfig - Azure Blob settings section diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 36ec263f..1345d700 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -26,9 +26,10 @@ import ( // GCS - presents methods for manipulate data on GCS type GCS struct { - client *storage.Client - Config *config.GCSConfig - clientPool *pool.ObjectPool + client *storage.Client + Config *config.GCSConfig + clientPool *pool.ObjectPool + encryptionKey []byte // Customer-Supplied Encryption Key (CSEK) } type debugGCSTransport struct { @@ -188,7 +189,24 @@ func (gcs *GCS) Connect(ctx context.Context) error { gcs.clientPool = pool.NewObjectPoolWithDefaultConfig(ctx, factory) gcs.clientPool.Config.MaxTotal = gcs.Config.ClientPoolSize * 3 gcs.client, err = storage.NewClient(ctx, storageClientOptions...) - return err + if err != nil { + return err + } + + // Validate and decode the encryption key if provided + if gcs.Config.EncryptionKey != "" { + key, err := base64.StdEncoding.DecodeString(gcs.Config.EncryptionKey) + if err != nil { + return errors.Wrap(err, "gcs: malformed encryption_key, must be base64-encoded 256-bit key") + } + if len(key) != 32 { + return fmt.Errorf("gcs: malformed encryption_key, must be base64-encoded 256-bit key (got %d bytes)", len(key)) + } + gcs.encryptionKey = key + log.Info().Msg("GCS: Customer-Supplied Encryption Key (CSEK) configured") + } + + return nil } func (gcs *GCS) Close(ctx context.Context) error { @@ -196,6 +214,14 @@ func (gcs *GCS) Close(ctx context.Context) error { return gcs.client.Close() } +// applyEncryption returns an ObjectHandle with encryption key applied if configured +func (gcs *GCS) applyEncryption(obj *storage.ObjectHandle) *storage.ObjectHandle { + if gcs.encryptionKey != nil { + return obj.Key(gcs.encryptionKey) + } + return obj +} + func (gcs *GCS) Walk(ctx context.Context, gcsPath string, recursive bool, process func(ctx context.Context, r RemoteFile) error) error { rootPath := path.Join(gcs.Config.Path, gcsPath) return gcs.WalkAbsolute(ctx, rootPath, recursive, process) @@ -252,7 +278,7 @@ func (gcs *GCS) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadC return nil, err } pClient := pClientObj.(*clientObject).Client - obj := pClient.Bucket(gcs.Config.Bucket).Object(key) + obj := gcs.applyEncryption(pClient.Bucket(gcs.Config.Bucket).Object(key)) reader, err := obj.NewReader(ctx) if err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { @@ -281,7 +307,7 @@ func (gcs *GCS) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser return err } pClient := pClientObj.(*clientObject).Client - obj := pClient.Bucket(gcs.Config.Bucket).Object(key) + obj := gcs.applyEncryption(pClient.Bucket(gcs.Config.Bucket).Object(key)) // always retry transient errors to mitigate retry logic bugs. obj = obj.Retryer(storage.WithPolicy(storage.RetryAlways)) writer := obj.NewWriter(ctx) @@ -314,7 +340,8 @@ func (gcs *GCS) StatFile(ctx context.Context, key string) (RemoteFile, error) { } func (gcs *GCS) StatFileAbsolute(ctx context.Context, key string) (RemoteFile, error) { - objAttr, err := gcs.client.Bucket(gcs.Config.Bucket).Object(key).Attrs(ctx) + obj := gcs.applyEncryption(gcs.client.Bucket(gcs.Config.Bucket).Object(key)) + objAttr, err := obj.Attrs(ctx) if err != nil { if errors.Is(err, storage.ErrObjectNotExist) { return nil, ErrNotFound @@ -369,7 +396,7 @@ func (gcs *GCS) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey } pClient := pClientObj.(*clientObject).Client src := pClient.Bucket(srcBucket).Object(srcKey) - dst := pClient.Bucket(gcs.Config.Bucket).Object(dstKey) + dst := gcs.applyEncryption(pClient.Bucket(gcs.Config.Bucket).Object(dstKey)) // always retry transient errors to mitigate retry logic bugs. dst = dst.Retryer(storage.WithPolicy(storage.RetryAlways)) attrs, err := src.Attrs(ctx) @@ -379,7 +406,10 @@ func (gcs *GCS) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey } return 0, err } - if _, err = dst.CopierFrom(src).Run(ctx); err != nil { + copier := dst.CopierFrom(src) + // If encryption is enabled, the destination will be encrypted + // Note: source objects from object disks are not encrypted by clickhouse-backup + if _, err = copier.Run(ctx); err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { log.Warn().Msgf("gcs.CopyObject: gcs.clientPool.InvalidateObject error: %+v", pErr) } diff --git a/pkg/storage/gcs_test.go b/pkg/storage/gcs_test.go new file mode 100644 index 00000000..b435729d --- /dev/null +++ b/pkg/storage/gcs_test.go @@ -0,0 +1,184 @@ +package storage + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/Altinity/clickhouse-backup/v2/pkg/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGCSEncryptionKeyValidation(t *testing.T) { + testCases := []struct { + name string + encryptionKey string + expectError bool + errorContains string + }{ + { + name: "empty key is valid (no encryption)", + encryptionKey: "", + expectError: false, + }, + { + name: "valid 256-bit key", + encryptionKey: base64.StdEncoding.EncodeToString(make([]byte, 32)), + expectError: false, + }, + { + name: "valid 256-bit key with random data", + encryptionKey: "dGhpcyBpcyBhIDMyIGJ5dGUga2V5ISEhISEhISEhISE=", // "this is a 32 byte key!!!!!!!!!!!" (32 bytes) base64 + expectError: false, + }, + { + name: "invalid base64", + encryptionKey: "not-valid-base64!!!", + expectError: true, + errorContains: "malformed encryption_key", + }, + { + name: "key too short (16 bytes / 128-bit)", + encryptionKey: base64.StdEncoding.EncodeToString(make([]byte, 16)), + expectError: true, + errorContains: "got 16 bytes", + }, + { + name: "key too long (64 bytes / 512-bit)", + encryptionKey: base64.StdEncoding.EncodeToString(make([]byte, 64)), + expectError: true, + errorContains: "got 64 bytes", + }, + { + name: "key slightly short (31 bytes)", + encryptionKey: base64.StdEncoding.EncodeToString(make([]byte, 31)), + expectError: true, + errorContains: "got 31 bytes", + }, + { + name: "key slightly long (33 bytes)", + encryptionKey: base64.StdEncoding.EncodeToString(make([]byte, 33)), + expectError: true, + errorContains: "got 33 bytes", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + gcs := &GCS{ + Config: &config.GCSConfig{ + EncryptionKey: tc.encryptionKey, + // These are required for Connect but we'll test key validation + // before actual connection by checking the error + Bucket: "test-bucket", + SkipCredentials: true, + }, + } + + // We can't fully test Connect without a GCS server, but we can + // validate the key parsing logic by checking if the error is + // related to key validation vs connection issues + err := gcs.validateAndDecodeEncryptionKey() + + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorContains) + assert.Nil(t, gcs.encryptionKey) + } else { + require.NoError(t, err) + if tc.encryptionKey != "" { + assert.NotNil(t, gcs.encryptionKey) + assert.Len(t, gcs.encryptionKey, 32) + } else { + assert.Nil(t, gcs.encryptionKey) + } + } + }) + } +} + +func TestGCSApplyEncryption(t *testing.T) { + t.Run("returns same object when no encryption key", func(t *testing.T) { + gcs := &GCS{ + Config: &config.GCSConfig{}, + encryptionKey: nil, + } + + // We can't create a real ObjectHandle without a client, but we can + // verify the logic by checking the nil case + result := gcs.applyEncryption(nil) + assert.Nil(t, result) + }) + + t.Run("encryption key is set correctly", func(t *testing.T) { + key := make([]byte, 32) + for i := range key { + key[i] = byte(i) + } + + gcs := &GCS{ + Config: &config.GCSConfig{}, + encryptionKey: key, + } + + // Verify the key is stored correctly + assert.Equal(t, key, gcs.encryptionKey) + assert.Len(t, gcs.encryptionKey, 32) + }) +} + +func TestGCSEncryptionKeyDecoding(t *testing.T) { + t.Run("correctly decodes valid base64 key", func(t *testing.T) { + // Create a known 32-byte key + originalKey := []byte("12345678901234567890123456789012") // exactly 32 bytes + encodedKey := base64.StdEncoding.EncodeToString(originalKey) + + gcs := &GCS{ + Config: &config.GCSConfig{ + EncryptionKey: encodedKey, + }, + } + + err := gcs.validateAndDecodeEncryptionKey() + require.NoError(t, err) + assert.Equal(t, originalKey, gcs.encryptionKey) + }) + + t.Run("handles URL-safe base64 encoding", func(t *testing.T) { + // Standard base64 with potential + and / characters + originalKey := make([]byte, 32) + for i := range originalKey { + originalKey[i] = byte(i * 8) // Will produce various characters + } + encodedKey := base64.StdEncoding.EncodeToString(originalKey) + + gcs := &GCS{ + Config: &config.GCSConfig{ + EncryptionKey: encodedKey, + }, + } + + err := gcs.validateAndDecodeEncryptionKey() + require.NoError(t, err) + assert.Equal(t, originalKey, gcs.encryptionKey) + }) +} + +// validateAndDecodeEncryptionKey is a helper that extracts the key validation +// logic for testing without needing a full GCS connection +func (gcs *GCS) validateAndDecodeEncryptionKey() error { + if gcs.Config.EncryptionKey == "" { + return nil + } + + key, err := base64.StdEncoding.DecodeString(gcs.Config.EncryptionKey) + if err != nil { + return fmt.Errorf("gcs: malformed encryption_key, must be base64-encoded 256-bit key: %w", err) + } + if len(key) != 32 { + return fmt.Errorf("gcs: malformed encryption_key, must be base64-encoded 256-bit key (got %d bytes)", len(key)) + } + gcs.encryptionKey = key + return nil +} diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index f26a22f7..ddde8f34 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -118,8 +118,8 @@ services: GCS_DEBUG: "${GCS_DEBUG:-false}" FTP_DEBUG: "${FTP_DEBUG:-false}" SFTP_DEBUG: "${SFTP_DEBUG:-false}" - AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}" COS_DEBUG: "${COS_DEBUG:-false}" + AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}" CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}" GOCOVERDIR: "/tmp/_coverage_/" # FIPS @@ -131,15 +131,17 @@ services: QA_GCS_OVER_S3_ACCESS_KEY: "${QA_GCS_OVER_S3_ACCESS_KEY}" QA_GCS_OVER_S3_SECRET_KEY: "${QA_GCS_OVER_S3_SECRET_KEY}" QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" - # AlibabaCloud over S3 +# AlibabaCloud over S3 QA_ALIBABA_ACCESS_KEY: "${QA_ALIBABA_ACCESS_KEY:-}" QA_ALIBABA_SECRET_KEY: "${QA_ALIBABA_SECRET_KEY:-}" - # Tencent Cloud Object Storage +# Tencent Cloud Object Storage QA_TENCENT_SECRET_ID: "${QA_TENCENT_SECRET_ID:-}" QA_TENCENT_SECRET_KEY: "${QA_TENCENT_SECRET_KEY:-}" # https://github.com/Altinity/clickhouse-backup/issues/691: AWS_ACCESS_KEY_ID: access_key AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key +# GCS encryption key + GCS_ENCRYPTION_KEY: "${GCS_ENCRYPTION_KEY:-}" volumes_from: - clickhouse ports: @@ -181,13 +183,15 @@ services: QA_GCS_OVER_S3_ACCESS_KEY: "${QA_GCS_OVER_S3_ACCESS_KEY}" QA_GCS_OVER_S3_SECRET_KEY: "${QA_GCS_OVER_S3_SECRET_KEY}" QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" - # AlibabaCloud over S3 +# AlibabaCloud over S3 QA_ALIBABA_ACCESS_KEY: "${QA_ALIBABA_ACCESS_KEY:-}" QA_ALIBABA_SECRET_KEY: "${QA_ALIBABA_SECRET_KEY:-}" - # Tencent Cloud Object Storage +# Tencent Cloud Object Storage QA_TENCENT_SECRET_ID: "${QA_TENCENT_SECRET_ID:-}" QA_TENCENT_SECRET_KEY: "${QA_TENCENT_SECRET_KEY:-}" - +# GCS encryption key + GCS_ENCRYPTION_KEY: "${GCS_ENCRYPTION_KEY:-}" +# fix failures during try to IMDS initialization inside clickhouse AWS_EC2_METADATA_DISABLED: "true" volumes: # clickhouse-backup related files requires for some tests diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 51cff9d1..5cef8bbc 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -189,10 +189,11 @@ services: # Tencent Cloud Object Storage QA_TENCENT_SECRET_ID: "${QA_TENCENT_SECRET_ID:-}" QA_TENCENT_SECRET_KEY: "${QA_TENCENT_SECRET_KEY:-}" - - # https://github.com/Altinity/clickhouse-backup/issues/691: +# https://github.com/Altinity/clickhouse-backup/issues/691: AWS_ACCESS_KEY_ID: access_key AWS_SECRET_ACCESS_KEY: it_is_my_super_secret_key +# GCS encryption key + GCS_ENCRYPTION_KEY: "${GCS_ENCRYPTION_KEY:-}" volumes_from: - clickhouse ports: @@ -235,13 +236,16 @@ services: QA_GCS_OVER_S3_ACCESS_KEY: "${QA_GCS_OVER_S3_ACCESS_KEY}" QA_GCS_OVER_S3_SECRET_KEY: "${QA_GCS_OVER_S3_SECRET_KEY}" QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" - AWS_EC2_METADATA_DISABLED: "true" - # AlibabaCloud over S3 +# AlibabaCloud over S3 QA_ALIBABA_ACCESS_KEY: "${QA_ALIBABA_ACCESS_KEY:-}" QA_ALIBABA_SECRET_KEY: "${QA_ALIBABA_SECRET_KEY:-}" - # Tencent Cloud Object Storage +# Tencent Cloud Object Storage QA_TENCENT_SECRET_ID: "${QA_TENCENT_SECRET_ID:-}" QA_TENCENT_SECRET_KEY: "${QA_TENCENT_SECRET_KEY:-}" + # GCS encryption key + GCS_ENCRYPTION_KEY: "${GCS_ENCRYPTION_KEY:-}" + # fix failures during try to IMDS initialization inside clickhouse + AWS_EC2_METADATA_DISABLED: "true" # to avoid backward incompatibility ;( # https://t.me/clickhouse_ru/359960 diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 1fecfaa4..7e8e0a8f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -796,10 +796,6 @@ func TestAzure(t *testing.T) { } func TestGCSWithCustomEndpoint(t *testing.T) { - if isTestShouldSkip("GCS_TESTS") { - t.Skip("Skipping GCS_EMULATOR integration tests...") - return - } env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") env.Cleanup(t, r) diff --git a/test/integration/run.sh b/test/integration/run.sh index b69fad27..ea96a022 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -20,6 +20,9 @@ export CLICKHOUSE_BACKUP_BIN export LOG_LEVEL=${LOG_LEVEL:-info} export TEST_LOG_LEVEL=${TEST_LOG_LEVEL:-info} +GCS_ENCRYPTION_KEY=$(openssl rand -base64 32) +export GCS_ENCRYPTION_KEY + if [[ -f "${CUR_DIR}/credentials.json" ]]; then export GCS_TESTS=${GCS_TESTS:-1} else diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index 4154e475..4992f5c9 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -156,19 +156,40 @@ services: - minio ftp_server: - image: stilliard/pure-ftpd + image: gists/pure-ftpd:latest hostname: ftp_server environment: - FTP_USER_NAME=test - FTP_USER_PASS=test - - FTP_USER_HOME=/home/test + - FTP_USER_HOME=/home/ftpuser/test - PUBLICHOST=ftp_server + - MIN_PASV_PORT=30000 + - MAX_PASV_PORT=31000 + + entrypoint: ["/bin/sh", "-c"] + command: + - | + mkdir -p /etc/pureftpd + mkdir -p "$$FTP_USER_HOME" + chown -R 1000:1000 /home/ftpuser + + touch /etc/pureftpd/pureftpd.passwd + + printf '%s\n%s\n' "$$FTP_USER_PASS" "$$FTP_USER_PASS" | pure-pw useradd "$$FTP_USER_NAME" -u 1000 -g 1000 -d "$$FTP_USER_HOME" -f /etc/pureftpd/pureftpd.passwd + + pure-pw mkdb /etc/pureftpd/pureftpd.pdb -f /etc/pureftpd/pureftpd.passwd + + exec /usr/sbin/pure-ftpd \ + -l puredb:/etc/pureftpd/pureftpd.pdb \ + -E -j -R \ + -P "$$PUBLICHOST" \ + -p "$$MIN_PASV_PORT:$$MAX_PASV_PORT" healthcheck: - test: echo 1 - interval: 3s - timeout: 2s - retries: 20 - start_period: 10s + test: ["CMD-SHELL", "nc -z localhost 21"] + interval: 10s + timeout: 3s + retries: 10 + start_period: 5s sftp_server: image: panubo/sshd:latest diff --git a/test/testflows/clickhouse_backup/docker-compose/kafka-service.yml b/test/testflows/clickhouse_backup/docker-compose/kafka-service.yml index 1ca984af..b32b8aa3 100644 --- a/test/testflows/clickhouse_backup/docker-compose/kafka-service.yml +++ b/test/testflows/clickhouse_backup/docker-compose/kafka-service.yml @@ -1,6 +1,6 @@ services: kafka: - image: confluentinc/cp-kafka:5.2.0 + image: confluentinc/cp-kafka:7.7.7 expose: - "9092" environment: diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index aadcc4af..c1941b6a 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,4 +1,4 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' allow_object_disk_streaming: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' restore_table_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 5s\', \' retries_jitter: 0\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' rbac_backup_always: true\', \' rbac_conflict_resolution: recreate\', \' config_backup_always: false\', \' named_collections_backup_always: false\', \' retriesduration: 5s\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' skip_disks: []\', \' skip_disk_types: []\', \' timeout: 30m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' restore_distributed_cluster: ""\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' default_replica_path: /clickhouse/tables/{cluster}/{shard}/{database}/{table}\', " default_replica_name: \'{replica}\'", \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' retry_mode: standard\', \' chunk_size: 5242880\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' sa_email: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \' chunk_size: 16777216\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' allow_multipart_download: false\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \' watch_is_main_process: false\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' assume_container_exists: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" +default_config = r"""'[\'general:\', \' remote_storage: none\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' allow_object_disk_streaming: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' restore_table_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 5s\', \' retries_jitter: 0\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' rbac_backup_always: true\', \' rbac_conflict_resolution: recreate\', \' config_backup_always: false\', \' named_collections_backup_always: false\', \' retriesduration: 5s\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' skip_disks: []\', \' skip_disk_types: []\', \' timeout: 30m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' restore_distributed_cluster: ""\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' default_replica_path: /clickhouse/tables/{cluster}/{shard}/{database}/{table}\', " default_replica_name: \'{replica}\'", \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' check_sum_algorithm: ""\', \' retry_mode: standard\', \' chunk_size: 5242880\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' sa_email: ""\', \' embedded_access_key: ""\', \' embedded_secret_key: ""\', \' skip_credentials: false\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \' chunk_size: 16777216\', \' encryption_key: ""\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' allow_multipart_download: false\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \' watch_is_main_process: false\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' assume_container_exists: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n clean_local_broken Remove all broken local backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --environment-override value, --env value override any environment variable via CLI parameter\n --help, -h show help\n --version, -v print the version'"""