diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9e731276..b4224a05 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -260,6 +260,8 @@ jobs: # don't change it to avoid broken CI/CD!!! # RUN_TESTS: "TestFIPS" # LOG_LEVEL: "debug" + # SFTP_DEBUG: "true" + # AZBLOB_DEBUG: "true" # FTP_DEBUG: "true" # S3_DEBUG: "true" CGO_ENABLED: 0 diff --git a/ChangeLog.md b/ChangeLog.md index a571fdb6..c7c824df 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -4,7 +4,9 @@ IMPROVEMENTS BUG FIXES - force set `RefCount` to 0 during `restore` for parts in S3/GCS over S3/Azure disks, for properly works DROP TABLE / DROP DATABASE -- ignore frozen-metadata during, create, upload, download, restore command, fix [826](https://github.com/Altinity/clickhouse-backup/issues/826) +- use `os.Link` instead `os.Rename` for ClickHouse 21.4+, to properly create backup object disks +- ignore `frozen_metadata` during, create, upload, download and restore commands, fix [826](https://github.com/Altinity/clickhouse-backup/issues/826) +- `allow_parallel: true` doesn't work after execute list command, fix [827](https://github.com/Altinity/clickhouse-backup/issues/827) # v2.4.28 IMPROVEMENT diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 83649f62..ddf4c52d 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -605,7 +605,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku return nil, nil, err } // If partitionsIdsMap is not empty, only parts in this partition will back up. - parts, size, err := filesystemhelper.MoveShadow(shadowPath, backupShadowPath, partitionsIdsMap) + parts, size, err := filesystemhelper.MoveShadow(shadowPath, backupShadowPath, partitionsIdsMap, version) if err != nil { return nil, nil, err } @@ -665,7 +665,8 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName, backup if fInfo.IsDir() { return nil } - if fInfo.Name() == "frozen_metadata.txt" { + // fix https://github.com/Altinity/clickhouse-backup/issues/826 + if strings.Contains(fInfo.Name(), "frozen_metadata") { return nil } objPartFileMeta, err := object_disk.ReadMetadataFromFile(fPath) diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 30024de4..1e5b478d 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -890,19 +890,22 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin if fInfo.IsDir() { return nil } - if fInfo.Name() == "frozen_metadata.txt" { + // fix https://github.com/Altinity/clickhouse-backup/issues/826 + if strings.Contains(fInfo.Name(), "frozen_metadata") { return nil } objMeta, err := object_disk.ReadMetadataFromFile(fPath) if err != nil { return err } - if objMeta.StorageObjectCount < 1 && objMeta.Version != object_disk.VersionRelativePath { + if objMeta.StorageObjectCount < 1 && objMeta.Version < object_disk.VersionRelativePath { return fmt.Errorf("%s: invalid object_disk.Metadata: %#v", fPath, objMeta) } - //to allow delete Object Disk Data after DROP TABLE/DATABASE ...SYNC - if objMeta.RefCount > 0 { + //to allow deleting Object Disk Data during DROP TABLE/DATABASE ...SYNC + if objMeta.RefCount > 0 || objMeta.ReadOnly { objMeta.RefCount = 0 + objMeta.ReadOnly = false + log.Debugf("%s %#v set RefCount=0 and ReadOnly=0", fPath, objMeta.StorageObjects) if writeMetaErr := object_disk.WriteMetadataToFile(objMeta, fPath); writeMetaErr != nil { return fmt.Errorf("%s: object_disk.WriteMetadataToFile return error: %v", fPath, writeMetaErr) } diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index c7980fb4..75ffcd98 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -160,6 +160,10 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM if err != nil { return err } + // fix https://github.com/Altinity/clickhouse-backup/issues/826 + if strings.Contains(info.Name(), "frozen_metadata") { + return nil + } filename := strings.Trim(strings.TrimPrefix(filePath, srcPartPath), "/") dstFilePath := filepath.Join(dstPartPath, filename) if info.IsDir() { @@ -207,11 +211,16 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa return ok } -func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap) ([]metadata.Part, int64, error) { +func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, version int) ([]metadata.Part, int64, error) { log := apexLog.WithField("logger", "MoveShadow") size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { + // fix https://github.com/Altinity/clickhouse-backup/issues/826 + if strings.Contains(info.Name(), "frozen_metadata") { + return nil + } + // possible relative path // store / 1f9 / 1f9dc899-0de9-41f8-b95c-26c1f0d67d93 / 20181023_2_2_0 / checksums.txt // store / 1f9 / 1f9dc899-0de9-41f8-b95c-26c1f0d67d93 / 20181023_2_2_0 / x.proj / checksums.txt @@ -239,7 +248,11 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E return nil } size += info.Size() - return os.Rename(filePath, dstFilePath) + if version < 21004000 { + return os.Rename(filePath, dstFilePath) + } else { + return os.Link(filePath, dstFilePath) + } }) return parts, size, err } diff --git a/pkg/status/status.go b/pkg/status/status.go index 7acb6f59..6a8b0c6e 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -72,16 +72,19 @@ func (status *AsyncStatus) CheckCommandInProgress(command string) bool { return false } +// InProgress any .Status == InProgressStatus command shall return true, https://github.com/Altinity/clickhouse-backup/issues/827 func (status *AsyncStatus) InProgress() bool { status.RLock() defer status.RUnlock() - n := len(status.commands) - 1 - if n < 0 { - status.log.Debugf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) - return false + for n := range status.commands { + if status.commands[n].Status == InProgressStatus { + status.log.Debugf("api.status.inProgress -> status.commands[%d].Status == %s, inProgress=%v", n, status.commands[n].Status, status.commands[n].Status == InProgressStatus) + return true + } } - status.log.Debugf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) - return status.commands[n].Status == InProgressStatus + + status.log.Debugf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) + return false } func (status *AsyncStatus) GetContextWithCancel(commandId int) (context.Context, context.CancelFunc, error) { diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index 5fa048ae..303cf29f 100644 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -237,11 +237,27 @@ cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml EOT -cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml +# zero replication is buggy +#cat < /etc/clickhouse-server/config.d/zero_copy_replication.xml +# +# +# 1 +# +# +#EOT + +cat < /etc/clickhouse-server/config.d/zookeeper_log.xml - - 1 - + + system + zookeeper_log
+ 7500 + 1048576 + 8192 + 524288 + 1000 + true +
EOT fi diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 12680980..2dd7ecdb 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -3,7 +3,9 @@ package main import ( + "bufio" "context" + "encoding/json" "fmt" "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/logcli" @@ -442,6 +444,7 @@ func TestS3NoDeletePermission(t *testing.T) { r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) + checkObjectStorageIsEmpty(r, "S3") } // TestDoRestoreRBAC need clickhouse-server restart, no parallel @@ -808,6 +811,29 @@ func testAPIBackupDelete(r *require.Assertions) { out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_delete_status 1") + + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XGET 'http://localhost:7171/backup/list'")) + log.Infof(out) + r.NoError(err) + scanner := bufio.NewScanner(strings.NewReader(out)) + for scanner.Scan() { + type backupJSON struct { + Name string `json:"name"` + Created string `json:"created"` + Size uint64 `json:"size,omitempty"` + Location string `json:"location"` + RequiredBackup string `json:"required"` + Desc string `json:"desc"` + } + listItem := backupJSON{} + r.NoError(json.Unmarshal(scanner.Bytes(), &listItem)) + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/%s/%s'", listItem.Location, listItem.Name)) + log.Infof(out) + r.NoError(err) + } + + r.NoError(scanner.Err()) + } func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { @@ -1122,15 +1148,16 @@ func TestTablePatterns(t *testing.T) { if createPattern { r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) } else { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName)) dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) + if restorePattern { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) } else { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", testBackupName)) } restored := uint64(0) @@ -1157,6 +1184,7 @@ func TestTablePatterns(t *testing.T) { } } + checkObjectStorageIsEmpty(r, "S3") } func TestProjections(t *testing.T) { @@ -1272,6 +1300,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name()))) r.Equal(uint64(200), res) fullCleanup(t, r, ch, backupNames, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml") + checkObjectStorageIsEmpty(r, "S3") } func TestSyncReplicaTimeout(t *testing.T) { @@ -1651,6 +1680,7 @@ func TestIntegrationS3Glacier(t *testing.T) { func TestIntegrationS3(t *testing.T) { //t.Parallel() + checkObjectStorageIsEmpty(require.New(t), "S3") runMainIntegrationScenario(t, "S3", "config-s3.yml") } @@ -2022,10 +2052,10 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true, backupConfig) } replaceStorageDiskNameForReBalance(r, ch, remoteStorageType, true) - checkObjectDiskBackupIsEmpty(r, remoteStorageType) + checkObjectStorageIsEmpty(r, remoteStorageType) } -func checkObjectDiskBackupIsEmpty(r *require.Assertions, remoteStorageType string) { +func checkObjectStorageIsEmpty(r *require.Assertions, remoteStorageType string) { if remoteStorageType == "AZBLOB" { r.NoError(dockerExec("azure", "apk", "add", "jq")) checkBlobCollection := func(containerName string, expected string) { @@ -2351,17 +2381,6 @@ func generateIncrementTestData(t *testing.T, ch *TestClickHouse, r *require.Asse } func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestClickHouse, databaseList []string) { - ch.queryWithNoError(r, "SYSTEM RESTART REPLICAS") - type readOnlyTable struct { - Database string `ch:"database"` - Table string `ch:"table"` - IsReadOnly string `ch:"is_readonly"` - } - allReadonly := make([]readOnlyTable, 0) - r.NoError(ch.chbackend.StructSelect(&allReadonly, "SELECT database,table,is_readonly FROM system.replicas WHERE is_readonly")) - t.Logf("Current ReadOnly replicas %#v", allReadonly) - - r.Equal(0, len(allReadonly)) log.Info("Drop all databases") for _, db := range databaseList { r.NoError(ch.dropDatabase(db + "_" + t.Name())) diff --git a/test/integration/run.sh b/test/integration/run.sh index 8da81155..6288336c 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -27,11 +27,12 @@ export GLACIER_TESTS=${GLACIER_TESTS:-0} export AZURE_TESTS=${AZURE_TESTS:-1} export RUN_ADVANCED_TESTS=${RUN_ADVANCED_TESTS:-1} +export GODEBUG=${GODEBUG:-} export S3_DEBUG=${S3_DEBUG:-false} export GCS_DEBUG=${GCS_DEBUG:-false} export FTP_DEBUG=${FTP_DEBUG:-false} export SFTP_DEBUG=${SFTP_DEBUG:-false} -export GODEBUG=${GODEBUG:-} +export AZBLOB_DEBUG=${AZBLOB_DEBUG:-false} export CLICKHOUSE_DEBUG=${CLICKHOUSE_DEBUG:-false} if [[ "${CLICKHOUSE_VERSION}" == 2* || "${CLICKHOUSE_VERSION}" == "head" ]]; then @@ -47,4 +48,4 @@ docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} up -d docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} exec minio mc alias list go test -parallel ${RUN_PARALLEL:-$(nproc)} -timeout ${TESTS_TIMEOUT:-30m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go -go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" \ No newline at end of file +go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index fbce817b..1ea22630 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,4 +1,4 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" +default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' request_payer: ""\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 4h\', \' debug: false\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'"""