diff --git a/ChangeLog.md b/ChangeLog.md index a46c1508..05818195 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -2,6 +2,8 @@ IMPROVEMENTS +- Add option `--skip-projections` to `create`, `upload`, `restore` commands, with table pattern to allow make backup + without projection fix [861](https://github.com/Altinity/clickhouse-backup/issues/861) - remove `S3_PART_SIZE` and `AZBLOB_BUFFER_SIZE` parameter from configuration and significant decrease memory usage during upload and download, fix [854](https://github.com/Altinity/clickhouse-backup/issues/854) - add `--configs-only` and `--rbac-only` options to `upload` and `download` command, diff --git a/LICENSE b/LICENSE index bad51fb6..42f21435 100644 --- a/LICENSE +++ b/LICENSE @@ -2,7 +2,7 @@ MIT License Copyright (c) 2018-2019 Alexander Akulov Copyright (c) 2020-2099 Altinity Inc. -Mantained by Eugene Klimov +Maintained by Eugene Klimov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Manual.md b/Manual.md index 00b16c7b..47b94667 100644 --- a/Manual.md +++ b/Manual.md @@ -43,6 +43,7 @@ Look at the system.parts partition and partition_id fields for details https://c --rbac-only Backup RBAC related objects only, will skip backup data, will backup schema only if --schema added --configs-only Backup 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added --skip-check-parts-columns Skip check system.parts_columns to allow backup inconsistent column types for data parts + --skip-projections Skip make hardlinks to *.proj/* files during backup creation --resume use_embedded_backup_restore: true, --resumable use_embedded_backup_restore: true Will resume upload for object disk data, hard links on local disk still continue to recreate, not work when use_embedded_backup_restore: true ``` @@ -77,6 +78,7 @@ Look at the system.parts partition and partition_id fields for details https://c --configs-only Backup 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added --resume, --resumable Save intermediate upload state and resume upload if backup exists on remote storage, ignore when 'remote_storage: custom' or 'use_embedded_backup_restore: true' --skip-check-parts-columns Skip check system.parts_columns to allow backup inconsistent column types for data parts + --skip-projections Skip make and upload hardlinks to *.proj/* files during backup creation --delete, --delete-source, --delete-local explicitly delete local backup during upload ``` @@ -104,6 +106,7 @@ Look at the system.parts partition and partition_id fields for details https://c --schema, -s Upload schemas only --rbac-only, --rbac Upload RBAC related objects only, will skip upload data, will backup schema only if --schema added --configs-only, --configs Upload 'clickhouse-server' configuration files only, will skip upload data, will backup schema only if --schema added + --skip-projections Skip make and upload hardlinks to *.proj/* files during backup creation --resume, --resumable Save intermediate upload state and resume upload if backup exists on remote storage, ignored with 'remote_storage: custom' or 'use_embedded_backup_restore: true' --delete, --delete-source, --delete-local explicitly delete local backup during upload @@ -175,6 +178,7 @@ Look at the system.parts partition and partition_id fields for details https://c --configs, --restore-configs, --do-restore-configs Restore 'clickhouse-server' CONFIG related files --rbac-only Restore RBAC related objects only, will skip backup data, will backup schema only if --schema added --configs-only Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added + --skip-projections Skip make hardlinks to *.proj/* files during backup restoring --resume, --resumable Will resume download for object disk data ``` @@ -207,6 +211,7 @@ Look at the system.parts partition and partition_id fields for details https://c --configs, --restore-configs, --do-restore-configs Download and Restore 'clickhouse-server' CONFIG related files --rbac-only Restore RBAC related objects only, will skip backup data, will backup schema only if --schema added --configs-only Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added + --skip-projections Skip make hardlinks to *.proj/* files during backup restoring --resume, --resumable Save intermediate download state and resume download if backup exists on remote storage, ignored with 'remote_storage: custom' or 'use_embedded_backup_restore: true' ``` @@ -304,6 +309,7 @@ Look at the system.parts partition and partition_id fields for details https://c --rbac, --backup-rbac, --do-backup-rbac Backup RBAC related objects only --configs, --backup-configs, --do-backup-configs Backup `clickhouse-server' configuration files only --skip-check-parts-columns Skip check system.parts_columns to allow backup inconsistent column types for data parts + --skip-projections Skip make and upload hardlinks to *.proj/* files during backup creation ``` ### CLI command - server diff --git a/ReadMe.md b/ReadMe.md index e100e7ee..e6552f46 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -605,6 +605,7 @@ Look at the system.parts partition and partition_id fields for details https://c --rbac-only Backup RBAC related objects only, will skip backup data, will backup schema only if --schema added --configs-only Backup 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added --skip-check-parts-columns Skip check system.parts_columns to allow backup inconsistent column types for data parts + --skip-projections Skip make hardlinks to *.proj/* files during backup creation --resume use_embedded_backup_restore: true, --resumable use_embedded_backup_restore: true Will resume upload for object disk data, hard links on local disk still continue to recreate, not work when use_embedded_backup_restore: true ``` @@ -639,6 +640,7 @@ Look at the system.parts partition and partition_id fields for details https://c --configs-only Backup 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added --resume, --resumable Save intermediate upload state and resume upload if backup exists on remote storage, ignore when 'remote_storage: custom' or 'use_embedded_backup_restore: true' --skip-check-parts-columns Skip check system.parts_columns to allow backup inconsistent column types for data parts + --skip-projections Skip make and upload hardlinks to *.proj/* files during backup creation --delete, --delete-source, --delete-local explicitly delete local backup during upload ``` @@ -666,6 +668,7 @@ Look at the system.parts partition and partition_id fields for details https://c --schema, -s Upload schemas only --rbac-only, --rbac Upload RBAC related objects only, will skip upload data, will backup schema only if --schema added --configs-only, --configs Upload 'clickhouse-server' configuration files only, will skip upload data, will backup schema only if --schema added + --skip-projections Skip make and upload hardlinks to *.proj/* files during backup creation --resume, --resumable Save intermediate upload state and resume upload if backup exists on remote storage, ignored with 'remote_storage: custom' or 'use_embedded_backup_restore: true' --delete, --delete-source, --delete-local explicitly delete local backup during upload @@ -737,6 +740,7 @@ Look at the system.parts partition and partition_id fields for details https://c --configs, --restore-configs, --do-restore-configs Restore 'clickhouse-server' CONFIG related files --rbac-only Restore RBAC related objects only, will skip backup data, will backup schema only if --schema added --configs-only Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added + --skip-projections Skip make hardlinks to *.proj/* files during backup restoring --resume, --resumable Will resume download for object disk data ``` @@ -769,6 +773,7 @@ Look at the system.parts partition and partition_id fields for details https://c --configs, --restore-configs, --do-restore-configs Download and Restore 'clickhouse-server' CONFIG related files --rbac-only Restore RBAC related objects only, will skip backup data, will backup schema only if --schema added --configs-only Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added + --skip-projections Skip make hardlinks to *.proj/* files during backup restoring --resume, --resumable Save intermediate download state and resume download if backup exists on remote storage, ignored with 'remote_storage: custom' or 'use_embedded_backup_restore: true' ``` @@ -866,6 +871,7 @@ Look at the system.parts partition and partition_id fields for details https://c --rbac, --backup-rbac, --do-backup-rbac Backup RBAC related objects only --configs, --backup-configs, --do-backup-configs Backup `clickhouse-server' configuration files only --skip-check-parts-columns Skip check system.parts_columns to allow backup inconsistent column types for data parts + --skip-projections Skip make and upload hardlinks to *.proj/* files during backup creation ``` ### CLI command - server diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 4994eb7e..220f46ca 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -115,7 +115,7 @@ func main() { Description: "Create new backup", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateBackup(c.Args().First(), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), c.Bool("resume"), version, c.Int("command-id")) + return b.CreateBackup(c.Args().First(), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), c.StringSlice("skip-projections"), c.Bool("resume"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -169,6 +169,11 @@ func main() { Hidden: false, Usage: "Skip check system.parts_columns to allow backup inconsistent column types for data parts", }, + cli.StringSliceFlag{ + Name: "skip-projections", + Hidden: false, + Usage: "Skip make hardlinks to *.proj/* files during backup creation, format `db_pattern.table_pattern:projections_pattern`, use https://pkg.go.dev/path/filepath#Match syntax", + }, cli.BoolFlag{ Name: "resume, resumable", Hidden: false, @@ -183,7 +188,7 @@ func main() { Description: "Create and upload", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateToRemote(c.Args().First(), c.Bool("delete-source"), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateToRemote(c.Args().First(), c.Bool("delete-source"), c.String("diff-from"), c.String("diff-from-remote"), c.String("tables"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("skip-check-parts-columns"), c.Bool("schema"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -247,6 +252,11 @@ func main() { Hidden: false, Usage: "Skip check system.parts_columns to allow backup inconsistent column types for data parts", }, + cli.StringSliceFlag{ + Name: "skip-projections", + Hidden: false, + Usage: "Skip make and upload hardlinks to *.proj/* files during backup creation, format `db_pattern.table_pattern:projections_pattern`, use https://pkg.go.dev/path/filepath#Match syntax", + }, cli.BoolFlag{ Name: "delete, delete-source, delete-local", Hidden: false, @@ -260,7 +270,7 @@ func main() { UsageText: "clickhouse-backup upload [-t, --tables=.] [--partitions=] [-s, --schema] [--diff-from=] [--diff-from-remote=] [--resumable] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.Upload(c.Args().First(), c.Bool("delete-source"), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("schema"), c.Bool("rbac-only"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) + return b.Upload(c.Args().First(), c.Bool("delete-source"), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("schema"), c.Bool("rbac-only"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -304,6 +314,11 @@ func main() { Hidden: false, Usage: "Upload 'clickhouse-server' configuration files only, will skip upload data, will backup schema only if --schema added", }, + cli.StringSliceFlag{ + Name: "skip-projections", + Hidden: false, + Usage: "Skip make and upload hardlinks to *.proj/* files during backup creation, format `db_pattern.table_pattern:projections_pattern`, use https://pkg.go.dev/path/filepath#Match syntax", + }, cli.BoolFlag{ Name: "resume, resumable", Hidden: false, @@ -380,7 +395,7 @@ func main() { UsageText: "clickhouse-backup restore [-t, --tables=.
] [-m, --restore-database-mapping=:[,<...>]] [--tm, --restore-table-mapping=:[,<...>]] [--partitions=] [-s, --schema] [-d, --data] [--rm, --drop] [-i, --ignore-dependencies] [--rbac] [--configs] [--resume] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("restore-table-mapping"), c.StringSlice("partitions"), c.Bool("schema"), c.Bool("data"), c.Bool("drop"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) + return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("restore-table-mapping"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("schema"), c.Bool("data"), c.Bool("drop"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -449,6 +464,11 @@ func main() { Hidden: false, Usage: "Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added", }, + cli.StringSliceFlag{ + Name: "skip-projections", + Hidden: false, + Usage: "Skip make hardlinks to *.proj/* files during backup restoring, format `db_pattern.table_pattern:projections_pattern`, use https://pkg.go.dev/path/filepath#Match syntax", + }, cli.BoolFlag{ Name: "resume, resumable", Hidden: false, @@ -462,7 +482,7 @@ func main() { UsageText: "clickhouse-backup restore_remote [--schema] [--data] [-t, --tables=.
] [-m, --restore-database-mapping=:[,<...>]] [--tm, --restore-table-mapping=:[,<...>]] [--partitions=] [--rm, --drop] [-i, --ignore-dependencies] [--rbac] [--configs] [--skip-rbac] [--skip-configs] [--resumable] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.RestoreFromRemote(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("restore-table-mapping"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("d"), c.Bool("rm"), c.Bool("i"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) + return b.RestoreFromRemote(c.Args().First(), c.String("tables"), c.StringSlice("restore-database-mapping"), c.StringSlice("restore-table-mapping"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("schema"), c.Bool("d"), c.Bool("rm"), c.Bool("i"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -531,6 +551,11 @@ func main() { Hidden: false, Usage: "Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added", }, + cli.StringSliceFlag{ + Name: "skip-projections", + Hidden: false, + Usage: "Skip make hardlinks to *.proj/* files during backup restoring, format `db_pattern.table_pattern:projections_pattern`, use https://pkg.go.dev/path/filepath#Match syntax", + }, cli.BoolFlag{ Name: "resume, resumable", Hidden: false, @@ -598,7 +623,7 @@ func main() { Description: "Execute create_remote + delete local, create full backup every `--full-interval`, create and upload incremental backup every `--watch-interval` use previous backup as base with `--diff-from-remote` option, use `backups_to_keep_remote` config option for properly deletion remote backups, will delete old backups which not have references from other backups", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.Watch(c.String("watch-interval"), c.String("full-interval"), c.String("watch-backup-name-template"), c.String("tables"), c.StringSlice("partitions"), c.Bool("schema"), c.Bool("rbac"), c.Bool("configs"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id"), nil, c) + return b.Watch(c.String("watch-interval"), c.String("full-interval"), c.String("watch-backup-name-template"), c.String("tables"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("schema"), c.Bool("rbac"), c.Bool("configs"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id"), nil, c) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -652,6 +677,11 @@ func main() { Hidden: false, Usage: "Skip check system.parts_columns to allow backup inconsistent column types for data parts", }, + cli.StringSliceFlag{ + Name: "skip-projections", + Hidden: false, + Usage: "Skip make and upload hardlinks to *.proj/* files during backup creation, format `db_pattern.table_pattern:projections_pattern`, use https://pkg.go.dev/path/filepath#Match syntax", + }, ), }, { diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 8283a839..64da89c1 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -57,7 +57,7 @@ func NewBackupName() string { // CreateBackup - create new backup of all tables matched by tablePattern // If backupName is empty string will use default backup name -func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns, resume bool, backupVersion string, commandId int) error { +func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, skipProjections []string, resume bool, backupVersion string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -129,7 +129,7 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, if b.cfg.ClickHouse.UseEmbeddedBackupRestore { err = b.createBackupEmbedded(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, backupVersion, tablePattern, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, startBackup, version) } else { - err = b.createBackupLocal(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, rbacOnly, configsOnly, backupVersion, partitions, partitionsIdMap, tables, tablePattern, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, startBackup, version) + err = b.createBackupLocal(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, rbacOnly, configsOnly, backupVersion, partitions, partitionsIdMap, tables, tablePattern, skipProjections, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, startBackup, version) } if err != nil { log.Error().Msgf("backup failed error: %v", err) @@ -193,7 +193,7 @@ func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupNa return backupRBACSize, backupConfigSize, nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRemote string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitions []string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, tablePattern string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, startBackup time.Time, version int) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRemote string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitions []string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, tablePattern string, skipProjections []string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, startBackup time.Time, version int) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -293,7 +293,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe logger.Debug().Msg("create data") shadowBackupUUID := strings.ReplaceAll(uuid.New().String(), "-", "") var addTableToBackupErr error - disksToPartsMap, realSize, objectDiskSize, addTableToBackupErr = b.AddTableToLocalBackup(createCtx, backupName, tablesDiffFromRemote, shadowBackupUUID, disks, &table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}], version) + disksToPartsMap, realSize, objectDiskSize, addTableToBackupErr = b.AddTableToLocalBackup(createCtx, backupName, tablesDiffFromRemote, shadowBackupUUID, disks, &table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}], skipProjections, version) if addTableToBackupErr != nil { logger.Error().Msgf("b.AddTableToLocalBackup error: %v", addTableToBackupErr) return addTableToBackupErr @@ -741,7 +741,7 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st return rbacDataSize, nil } -func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, tablesDiffFromRemote map[metadata.TableTitle]metadata.TableMetadata, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsIdsMap common.EmptyMap, version int) (map[string][]metadata.Part, map[string]int64, map[string]int64, error) { +func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, tablesDiffFromRemote map[metadata.TableTitle]metadata.TableMetadata, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsIdsMap common.EmptyMap, skipProjections []string, version int) (map[string][]metadata.Part, map[string]int64, map[string]int64, error) { logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "create", @@ -795,7 +795,7 @@ func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, return nil, nil, nil, err } // If partitionsIdsMap is not empty, only parts in this partition will back up. - parts, size, err := filesystemhelper.MoveShadowToBackup(shadowPath, backupShadowPath, partitionsIdsMap, tablesDiffFromRemote[metadata.TableTitle{Database: table.Database, Table: table.Name}], disk, version) + parts, size, err := filesystemhelper.MoveShadowToBackup(shadowPath, backupShadowPath, partitionsIdsMap, table, tablesDiffFromRemote[metadata.TableTitle{Database: table.Database, Table: table.Name}], disk, skipProjections, version) if err != nil { return nil, nil, nil, err } @@ -865,7 +865,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string, return nil } // fix https://github.com/Altinity/clickhouse-backup/issues/826 - if strings.Contains(fInfo.Name(), "frozen_metadata") { + if strings.Contains(fInfo.Name(), "frozen_metadata.txt") { return nil } var realSize, objSize int64 diff --git a/pkg/backup/create_remote.go b/pkg/backup/create_remote.go index 3bf0c463..0d587fdc 100644 --- a/pkg/backup/create_remote.go +++ b/pkg/backup/create_remote.go @@ -5,7 +5,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/status" ) -func (b *Backuper) CreateToRemote(backupName string, deleteSource bool, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, resume bool, version string, commandId int) error { +func (b *Backuper) CreateToRemote(backupName string, deleteSource bool, diffFrom, diffFromRemote, tablePattern string, partitions, skipProjections []string, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, resume bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -15,10 +15,10 @@ func (b *Backuper) CreateToRemote(backupName string, deleteSource bool, diffFrom if backupName == "" { backupName = NewBackupName() } - if err := b.CreateBackup(backupName, diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, resume, version, commandId); err != nil { + if err := b.CreateBackup(backupName, diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, skipProjections, resume, version, commandId); err != nil { return err } - if err := b.Upload(backupName, deleteSource, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly, rbacOnly, configsOnly, resume, version, commandId); err != nil { + if err := b.Upload(backupName, deleteSource, diffFrom, diffFromRemote, tablePattern, partitions, skipProjections, schemaOnly, rbacOnly, configsOnly, resume, version, commandId); err != nil { return err } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 45883247..76765df1 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -44,7 +44,7 @@ import ( var CreateDatabaseRE = regexp.MustCompile(`(?m)^CREATE DATABASE (\s*)(\S+)(\s*)`) // Restore - restore tables matched by tablePattern from backupName -func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tableMapping, partitions []string, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, backupVersion string, commandId int) error { +func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tableMapping, partitions, skipProjections []string, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, backupVersion string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -227,7 +227,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab } if dataOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { - if err := b.RestoreData(ctx, backupName, backupMetadata, dataOnly, metadataPath, tablePattern, partitions, disks, version); err != nil { + if err := b.RestoreData(ctx, backupName, backupMetadata, dataOnly, metadataPath, tablePattern, partitions, skipProjections, disks, version); err != nil { return err } } @@ -599,7 +599,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a // search in local user directory if sqlFiles, globErr := filepath.Glob(path.Join(accessPath, "*.sql")); globErr == nil { - existsRBACObjectIds := []string{} + var existsRBACObjectIds []string for _, f := range sqlFiles { sql, readErr := os.ReadFile(f) if readErr != nil { @@ -1274,7 +1274,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci } // RestoreData - restore data for tables matched by tablePattern from backupName -func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk, version int) error { +func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, dataOnly bool, metadataPath, tablePattern string, partitions, skipProjections []string, disks []clickhouse.Disk, version int) error { var err error startRestoreData := time.Now() diskMap := make(map[string]string, len(disks)) @@ -1310,7 +1310,7 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMet if b.isEmbedded { err = b.restoreDataEmbedded(ctx, backupName, dataOnly, version, tablesForRestore, partitionsNameList) } else { - err = b.restoreDataRegular(ctx, backupName, backupMetadata, tablePattern, tablesForRestore, diskMap, diskTypes, disks) + err = b.restoreDataRegular(ctx, backupName, backupMetadata, tablePattern, tablesForRestore, diskMap, diskTypes, disks, skipProjections) } if err != nil { return err @@ -1326,7 +1326,7 @@ func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, d return b.restoreEmbedded(ctx, backupName, false, dataOnly, version, tablesForRestore, partitionsNameList) } -func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk) error { +func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, skipProjections []string) error { if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreMapping(tablePattern, "database") } @@ -1383,11 +1383,11 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ba restoreBackupWorkingGroup.Go(func() error { // https://github.com/Altinity/clickhouse-backup/issues/529 if b.cfg.ClickHouse.RestoreAsAttach { - if restoreErr := b.restoreDataRegularByAttach(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, logger); restoreErr != nil { + if restoreErr := b.restoreDataRegularByAttach(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, skipProjections, logger); restoreErr != nil { return restoreErr } } else { - if restoreErr := b.restoreDataRegularByParts(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, logger); restoreErr != nil { + if restoreErr := b.restoreDataRegularByParts(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, skipProjections, logger); restoreErr != nil { return restoreErr } } @@ -1413,8 +1413,8 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ba return nil } -func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger) error { - if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, b.ch, false); err != nil { +func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, skipProjections []string, logger zerolog.Logger) error { + if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, skipProjections, b.ch, false); err != nil { return fmt.Errorf("can't copy data to storage '%s.%s': %v", table.Database, table.Table, err) } logger.Debug().Msg("data to 'storage' copied") @@ -1433,8 +1433,8 @@ func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName st return nil } -func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger) error { - if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, b.ch, true); err != nil { +func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, skipProjections []string, logger zerolog.Logger) error { + if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, skipProjections, b.ch, true); err != nil { return fmt.Errorf("can't copy data to detached '%s.%s': %v", table.Database, table.Table, err) } logger.Debug().Msg("data to 'detached' copied") @@ -1529,7 +1529,7 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin return nil } // fix https://github.com/Altinity/clickhouse-backup/issues/826 - if strings.Contains(fInfo.Name(), "frozen_metadata") { + if strings.Contains(fInfo.Name(), "frozen_metadata.txt") { return nil } if b.resume { @@ -1649,6 +1649,9 @@ func (b *Backuper) findObjectDiskPartRecursive(ctx context.Context, backup metad } var requiredTable *metadata.TableMetadata requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, metadata.TableTitle{Database: table.Database, Table: table.Table}) + if err != nil { + return "", "", err + } // @todo think about add check what if disk type could changed (should already restricted, cause upload seek part in the same disk name) for requiredDiskName, parts := range requiredTable.Parts { for _, requiredPart := range parts { diff --git a/pkg/backup/restore_remote.go b/pkg/backup/restore_remote.go index f8a5e0a5..cfb8ce2c 100644 --- a/pkg/backup/restore_remote.go +++ b/pkg/backup/restore_remote.go @@ -2,12 +2,12 @@ package backup import "errors" -func (b *Backuper) RestoreFromRemote(backupName, tablePattern string, databaseMapping, tableMapping, partitions []string, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, version string, commandId int) error { +func (b *Backuper) RestoreFromRemote(backupName, tablePattern string, databaseMapping, tableMapping, partitions, skipProjections []string, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, version string, commandId int) error { if err := b.Download(backupName, tablePattern, partitions, schemaOnly, rbacOnly, configsOnly, resume, version, commandId); err != nil { // https://github.com/Altinity/clickhouse-backup/issues/625 if !errors.Is(err, ErrBackupIsAlreadyExists) { return err } } - return b.Restore(backupName, tablePattern, databaseMapping, tableMapping, partitions, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume, version, commandId) + return b.Restore(backupName, tablePattern, databaseMapping, tableMapping, partitions, skipProjections, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume, version, commandId) } diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 03e644d0..8d22af5c 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -32,7 +32,7 @@ import ( "github.com/yargevad/filepathx" ) -func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, rbacOnly, configsOnly, resume bool, backupVersion string, commandId int) error { +func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFromRemote, tablePattern string, partitions, skipProjections []string, schemaOnly, rbacOnly, configsOnly, resume bool, backupVersion string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -149,7 +149,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr //skip upload data for embedded backup with empty embedded_backup_disk if doUploadData && (!b.isEmbedded || b.cfg.ClickHouse.EmbeddedBackupDisk != "") { var files map[string][]string - files, uploadedBytes, uploadTableErr = b.uploadTableData(uploadCtx, backupName, deleteSource, tablesForUpload[idx]) + files, uploadedBytes, uploadTableErr = b.uploadTableData(uploadCtx, backupName, deleteSource, tablesForUpload[idx], skipProjections) if uploadTableErr != nil { return uploadTableErr } @@ -369,7 +369,7 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, } if b.cfg.General.RemoteStorage == "custom" && b.resume { - return fmt.Errorf("Resumable state not allowed for `remote_storage: custom`. Disable it by setting use_resumable_state=false in `general` config section") + return fmt.Errorf("resumable state not allowed for `remote_storage: custom`. Disable it by setting use_resumable_state=false in `general` config section") } if b.cfg.General.RemoteStorage == "s3" && len(b.cfg.S3.CustomStorageClassMap) > 0 { for pattern, storageClass := range b.cfg.S3.CustomStorageClassMap { @@ -482,7 +482,7 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate return uint64(remoteUploaded.Size()), nil } -func (b *Backuper) uploadTableData(ctx context.Context, backupName string, deleteSource bool, table metadata.TableMetadata) (map[string][]string, int64, error) { +func (b *Backuper) uploadTableData(ctx context.Context, backupName string, deleteSource bool, table metadata.TableMetadata, skipProjections []string) (map[string][]string, int64, error) { dbAndTablePath := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) uploadedFiles := map[string][]string{} capacity := 0 @@ -501,7 +501,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet splitPartsCapacity := 0 for disk := range table.Parts { backupPath := b.getLocalBackupDataPathForTable(backupName, disk, dbAndTablePath) - splitPartsList, err := b.splitPartFiles(backupPath, table.Parts[disk]) + splitPartsList, err := b.splitPartFiles(backupPath, table.Parts[disk], table.Database, table.Table, skipProjections) if err != nil { return nil, 0, err } @@ -758,16 +758,17 @@ bodyRead: } } -func (b *Backuper) splitPartFiles(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { +func (b *Backuper) splitPartFiles(basePath string, parts []metadata.Part, database, table string, skipProjections []string) ([]metadata.SplitPartFiles, error) { if b.cfg.General.UploadByPart { - return b.splitFilesByName(basePath, parts) + return b.splitFilesByName(basePath, parts, database, table, skipProjections) } else { - return b.splitFilesBySize(basePath, parts) + return b.splitFilesBySize(basePath, parts, database, table, skipProjections) } } -func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { +func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part, database, table string, skipProjections []string) ([]metadata.SplitPartFiles, error) { result := make([]metadata.SplitPartFiles, 0) + for i := range parts { if parts[i].Required { continue @@ -782,6 +783,10 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m return nil } relativePath := strings.TrimPrefix(filePath, basePath) + // https://github.com/Altinity/clickhouse-backup/issues/861 + if filesystemhelper.IsSkipProjections(skipProjections, path.Join(database, table, relativePath)) { + return nil + } files = append(files, relativePath) return nil }) @@ -796,7 +801,7 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m return result, nil } -func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { +func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part, database, table string, skipProjections []string) ([]metadata.SplitPartFiles, error) { var size int64 var files []string maxSize := b.cfg.General.MaxFileSize @@ -814,6 +819,11 @@ func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]m if !info.Mode().IsRegular() { return nil } + relativePath := strings.TrimPrefix(filePath, basePath) + // https://github.com/Altinity/clickhouse-backup/issues/861 + if filesystemhelper.IsSkipProjections(skipProjections, path.Join(database, table, relativePath)) { + return nil + } if (size+info.Size()) > maxSize && len(files) > 0 { result = append(result, metadata.SplitPartFiles{ Prefix: strconv.Itoa(partSuffix), @@ -823,7 +833,6 @@ func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]m size = 0 partSuffix += 1 } - relativePath := strings.TrimPrefix(filePath, basePath) files = append(files, relativePath) size += info.Size() return nil diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index 2560f50e..39b063c8 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -66,7 +66,7 @@ func (b *Backuper) ValidateWatchParams(watchInterval, fullInterval, watchBackupN // // - each watch-interval, run create_remote increment --diff-from=prev-name + delete local increment, even when upload failed // - save previous backup type incremental, next try will also incremental, until reach full interval -func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern string, partitions []string, schemaOnly, backupRBAC, backupConfigs, skipCheckPartsColumns bool, version string, commandId int, metrics metrics.APIMetricsInterface, cliCtx *cli.Context) error { +func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern string, partitions, skipProjections []string, schemaOnly, backupRBAC, backupConfigs, skipCheckPartsColumns bool, version string, commandId int, metrics metrics.APIMetricsInterface, cliCtx *cli.Context) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -122,14 +122,14 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } if metrics != nil { createRemoteErr, createRemoteErrCount = metrics.ExecuteWithMetrics("create_remote", createRemoteErrCount, func() error { - return b.CreateToRemote(backupName, false, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) + return b.CreateToRemote(backupName, false, "", diffFromRemote, tablePattern, partitions, skipProjections, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) }) deleteLocalErr, deleteLocalErrCount = metrics.ExecuteWithMetrics("delete", deleteLocalErrCount, func() error { return b.RemoveBackupLocal(ctx, backupName, nil) }) } else { - createRemoteErr = b.CreateToRemote(backupName, false, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) + createRemoteErr = b.CreateToRemote(backupName, false, "", diffFromRemote, tablePattern, partitions, skipProjections, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) if createRemoteErr != nil { cmd := "create_remote" if diffFromRemote != "" { diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index c136120a..47405a37 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -116,7 +116,7 @@ func MkdirAll(path string, ch *clickhouse.ClickHouse, disks []clickhouse.Disk) e } // HardlinkBackupPartsToStorage - copy partitions for specific table to detached folder -func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableMetadata, disks []clickhouse.Disk, diskMap map[string]string, tableDataPaths []string, ch *clickhouse.ClickHouse, toDetached bool) error { +func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableMetadata, disks []clickhouse.Disk, diskMap map[string]string, tableDataPaths, skipProjections []string, ch *clickhouse.ClickHouse, toDetached bool) error { start := time.Now() dstDataPaths := clickhouse.GetDisksByPaths(disks, tableDataPaths) dbAndTableDir := path.Join(common.TablePathEncode(backupTable.Database), common.TablePathEncode(backupTable.Table)) @@ -147,7 +147,7 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM backupDiskName = part.RebalancedDisk dstParentDir, dstParentDirExists = dstDataPaths[part.RebalancedDisk] if !dstParentDirExists { - return fmt.Errorf("dstDataPaths=%#v, not contains %s", dstDataPaths, part.RebalancedDisk) + return fmt.Errorf("dstDataPaths=%#v, not contains rebalanced %s", dstDataPaths, part.RebalancedDisk) } } backupDiskPath := diskMap[backupDiskName] @@ -175,12 +175,20 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM return err } // fix https://github.com/Altinity/clickhouse-backup/issues/826 - if strings.Contains(info.Name(), "frozen_metadata") { + if strings.Contains(info.Name(), "frozen_metadata.txt") { return nil } filename := strings.Trim(strings.TrimPrefix(filePath, srcPartPath), "/") + // https://github.com/Altinity/clickhouse-backup/issues/861 + if IsSkipProjections(skipProjections, path.Join(backupTable.Database, backupTable.Table, part.Name, filename)) { + return nil + } dstFilePath := filepath.Join(dstPartPath, filename) if info.IsDir() { + // https://github.com/Altinity/clickhouse-backup/issues/861 + if IsSkipProjections(skipProjections, path.Join(backupTable.Database, backupTable.Table, part.Name, filename)+"/") { + return nil + } log.Debug().Msgf("MkDir %s", dstFilePath) return Mkdir(dstFilePath, ch, disks) } @@ -248,12 +256,12 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa return false } -func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk, version int) ([]metadata.Part, int64, error) { +func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, table *clickhouse.Table, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk, skipProjections []string, version int) ([]metadata.Part, int64, error) { size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { // fix https://github.com/Altinity/clickhouse-backup/issues/826 - if strings.Contains(info.Name(), "frozen_metadata") { + if strings.Contains(info.Name(), "frozen_metadata.txt") { return nil } @@ -267,6 +275,12 @@ func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap if len(pathParts) != 4 { return nil } + + // https://github.com/Altinity/clickhouse-backup/issues/861 + if IsSkipProjections(skipProjections, path.Join(table.Database, table.Name, path.Join(pathParts[3:]...))) { + return nil + } + if len(partitionsBackupMap) != 0 && !IsPartInPartition(pathParts[3], partitionsBackupMap) { return nil } @@ -302,6 +316,64 @@ func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap return parts, size, err } +func IsSkipProjections(skipProjections []string, relativePath string) bool { + log.Debug().Msgf("try IsSkipProjections, skipProjections=%v, relativePath=%s", skipProjections, relativePath) + if skipProjections == nil || len(skipProjections) == 0 { + return false + } + + matchPattenFinal := func(dbPattern string, tablePattern string, projectionPattern string, relativePath string) bool { + finalPattern := path.Join(dbPattern, tablePattern, "*", projectionPattern+".proj", "*") + if strings.HasSuffix(relativePath, ".proj") { + finalPattern = path.Join(dbPattern, tablePattern, "*", projectionPattern+".proj") + } + if isMatched, err := filepath.Match(finalPattern, relativePath); isMatched { + return isMatched + } else if err != nil { + log.Warn().Msgf("filepath.Match(%s, %s) return error: %v", finalPattern, relativePath, err) + } else { + log.Debug().Msgf("IsSkipProjections not matched %s->%s", finalPattern, relativePath) + } + return false + } + if strings.Contains(relativePath, ".proj/") || strings.HasSuffix(relativePath, ".proj") { + dbPattern := "*" + tablePattern := "*" + projectionPattern := "*" + isWildCardPattern := true + for _, skipPatterns := range skipProjections { + for _, tableAndProjectionPatterns := range strings.Split(skipPatterns, ",") { + dbPattern = "*" + tablePattern = "*" + projectionPattern = "*" + tableAndProjectionPattern := strings.SplitN(tableAndProjectionPatterns, ":", 2) + if len(tableAndProjectionPattern) == 2 { + projectionPattern = tableAndProjectionPattern[1] + isWildCardPattern = false + } + dbAndTablePattern := strings.SplitN(tableAndProjectionPattern[0], ".", 2) + if len(dbAndTablePattern) == 2 { + dbPattern = dbAndTablePattern[0] + tablePattern = dbAndTablePattern[1] + isWildCardPattern = false + } else { + tablePattern = dbAndTablePattern[0] + isWildCardPattern = false + } + if isMatched := matchPattenFinal(dbPattern, tablePattern, projectionPattern, relativePath); isMatched { + return true + } + } + } + if isWildCardPattern { + if isMatched := matchPattenFinal(dbPattern, tablePattern, projectionPattern, relativePath); isMatched { + return true + } + } + } + return false +} + func addRequiredPartIfNotExists(parts []metadata.Part, relativePath string, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk) ([]metadata.Part, bool, bool) { isRequiredPartFound := false exists := false diff --git a/pkg/filesystemhelper/filesystemhelper_test.go b/pkg/filesystemhelper/filesystemhelper_test.go new file mode 100644 index 00000000..32bdb5eb --- /dev/null +++ b/pkg/filesystemhelper/filesystemhelper_test.go @@ -0,0 +1,102 @@ +package filesystemhelper + +import ( + "testing" +) + +func TestIsSkipProjections(t *testing.T) { + tests := []struct { + name string + skipProjections []string + relativePath string + expectedResult bool + }{ + { + name: "Not match with nil", + skipProjections: nil, + relativePath: "db/table/part/projection.proj/file", + expectedResult: false, + }, + { + name: "Not match with empty pattern", + skipProjections: []string{}, + relativePath: "db/table/part/projection.proj/file", + expectedResult: false, + }, + { + name: "Match with wildcard pattern", + skipProjections: []string{"*"}, + relativePath: "db/table/part/projection.proj/file", + expectedResult: true, + }, + { + name: "Match with specific db and table", + skipProjections: []string{"db.table:projection"}, + relativePath: "db/table/part/projection.proj/file", + expectedResult: true, + }, + { + name: "No match with specific db and table", + skipProjections: []string{"db.table:projection"}, + relativePath: "db/table/part/other.proj/file", + expectedResult: false, + }, + { + name: "Match with specific table and wildcard projection", + skipProjections: []string{"table:*"}, + relativePath: "db/table/part/projection.proj/file", + expectedResult: true, + }, + { + name: "No match with specific table and wildcard projection", + skipProjections: []string{"table:*"}, + relativePath: "db/othertable/part/projection.proj/file", + expectedResult: false, + }, + { + name: "Match with multiple patterns", + skipProjections: []string{"db.table:projection", "otherdb.othertable:otherprojection"}, + relativePath: "otherdb/othertable/part/otherprojection.proj/file", + expectedResult: true, + }, + { + name: "No match with multiple patterns", + skipProjections: []string{"db.table:projection", "otherdb.othertable:otherprojection"}, + relativePath: "db/table/part/otherprojection.proj/file", + expectedResult: false, + }, + { + name: "Match with multiple wildcard patterns", + skipProjections: []string{"db.table:*", "db2.*:othe?projection,other*.*table:*projection"}, + relativePath: "otherdb/othertable/part/otherprojection.proj/file", + expectedResult: true, + }, + { + name: "No match with multiple wildcard patterns", + skipProjections: []string{"db*.table:?projection", "otherd?.othertab*:otherprojection"}, + relativePath: "db/table/part/otherprojection.proj/file", + expectedResult: false, + }, + { + name: "Match with real file", + skipProjections: []string{"default.*"}, + relativePath: "default/table_with_projection/20250124_2_2_0/x.proj/columns.txt", + expectedResult: true, + }, + { + name: "Match with real dir", + skipProjections: []string{"default.*"}, + relativePath: "default/table_with_projection/20250124_2_2_0/x.proj", + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsSkipProjections(tt.skipProjections, tt.relativePath) + if result != tt.expectedResult { + t.Errorf("IsSkipProjections() = %v, want %v", result, tt.expectedResult) + } + }) + } +} diff --git a/pkg/server/server.go b/pkg/server/server.go index 2e290112..ec10ebbf 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -156,11 +156,7 @@ func (api *APIServer) RunWatch(cliCtx *cli.Context) { log.Info().Msg("Starting API Server in watch mode") b := backup.NewBackuper(api.config) commandId, _ := status.Current.Start("watch") - err := b.Watch( - cliCtx.String("watch-interval"), cliCtx.String("full-interval"), cliCtx.String("watch-backup-name-template"), - "*.*", nil, false, false, false, false, - api.clickhouseBackupVersion, commandId, api.GetMetrics(), cliCtx, - ) + err := b.Watch(cliCtx.String("watch-interval"), cliCtx.String("full-interval"), cliCtx.String("watch-backup-name-template"), "*.*", nil, nil, false, false, false, false, api.clickhouseBackupVersion, commandId, api.GetMetrics(), cliCtx) api.handleWatchResponse(commandId, err) } @@ -529,6 +525,7 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti } tablePattern := "" partitionsToBackup := make([]string, 0) + skipProjections := make([]string, 0) schemaOnly := false rbacOnly := false configsOnly := false @@ -590,12 +587,16 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti skipCheckPartsColumns = true fullCommand = fmt.Sprintf("%s --skip-check-parts-columns", fullCommand) } + if matchParam, skipProjectionsFromArgs := simpleParseArg(i, args, "--skip-projections"); matchParam { + skipProjections = append(skipProjections, skipProjectionsFromArgs) + fullCommand = fmt.Sprintf("%s --skip-projections=%s", fullCommand, skipProjectionsFromArgs) + } } commandId, _ := status.Current.Start(fullCommand) go func() { b := backup.NewBackuper(cfg) - err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) + err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, skipProjections, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) api.handleWatchResponse(commandId, err) }() @@ -772,6 +773,7 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { backupsJSON := make([]backupJSON, 0) cfg, err := api.ReloadConfig(w, "list") if err != nil { + api.writeError(w, http.StatusInternalServerError, "list", err) return } vars := mux.Vars(r) @@ -782,10 +784,6 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { } commandId, ctx := status.Current.Start(fullCommand) defer status.Current.Stop(commandId, err) - if err != nil { - api.writeError(w, http.StatusInternalServerError, "list", err) - return - } b := backup.NewBackuper(cfg) if where == "local" || !wherePresent { var localBackups []backup.LocalBackup @@ -875,6 +873,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) createConfigs := false configsOnly := false checkPartsColumns := true + skipProjections := make([]string, 0) resume := false fullCommand := "create" query := r.URL.Query() @@ -917,6 +916,11 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) fullCommand += " --skip-check-parts-columns" } + if skipProjectionsFromQuery, exist := api.getQueryParameter(query, "skip-projections"); exist { + skipProjections = append(skipProjections, skipProjectionsFromQuery) + fullCommand += " --skip-projections=" + strings.Join(skipProjections, ",") + } + if _, exist := query["resume"]; exist { resume = true fullCommand += " --resume" @@ -938,7 +942,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("create", 0, func() error { b := backup.NewBackuper(cfg) - return b.CreateBackup(backupName, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, checkPartsColumns, resume, api.clickhouseBackupVersion, commandId) + return b.CreateBackup(backupName, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, checkPartsColumns, skipProjections, resume, api.clickhouseBackupVersion, commandId) }) if err != nil { log.Error().Msgf("API /backup/create error: %v", err) @@ -981,6 +985,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { } tablePattern := "" partitionsToBackup := make([]string, 0) + skipProjections := make([]string, 0) schemaOnly := false rbacOnly := false configsOnly := false @@ -1032,6 +1037,10 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { skipCheckPartsColumns = true fullCommand = fmt.Sprintf("%s --skip-check-parts-columns", fullCommand) } + if skipProjectionsFromQuery, exist := api.getQueryParameter(query, "skip_projections"); exist { + skipProjections = append(skipProjections, skipProjectionsFromQuery) + fullCommand = fmt.Sprintf("%s --skip-projections=%s", fullCommand, skipProjectionsFromQuery) + } if status.Current.CheckCommandInProgress(fullCommand) { log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) @@ -1042,7 +1051,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { commandId, _ := status.Current.Start(fullCommand) go func() { b := backup.NewBackuper(cfg) - err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) + err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, skipProjections, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) api.handleWatchResponse(commandId, err) }() api.sendJSONEachRow(w, http.StatusCreated, struct { @@ -1128,6 +1137,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) name := utils.CleanBackupNameRE.ReplaceAllString(vars["name"], "") tablePattern := "" partitionsToBackup := make([]string, 0) + skipProjections := make([]string, 0) schemaOnly := false rbacOnly := false configsOnly := false @@ -1168,6 +1178,10 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) configsOnly = true fullCommand += " --configs-only" } + if skipProjectionsFromQuery, exist := query["skip-projections"]; exist { + skipProjections = skipProjectionsFromQuery + fullCommand += " --skip-projections=" + strings.Join(skipProjectionsFromQuery, ",") + } if _, exist := query["resumable"]; exist { resume = true fullCommand += " --resumable" @@ -1190,7 +1204,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) commandId, _ := status.Current.Start(fullCommand) err, _ := api.metrics.ExecuteWithMetrics("upload", 0, func() error { b := backup.NewBackuper(cfg) - return b.Upload(name, deleteSource, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, resume, api.cliApp.Version, commandId) + return b.Upload(name, deleteSource, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, skipProjections, schemaOnly, rbacOnly, configsOnly, resume, api.cliApp.Version, commandId) }) if err != nil { log.Error().Msgf("Upload error: %v", err) @@ -1250,6 +1264,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) rbacOnly := false restoreConfigs := false configsOnly := false + skipProjections := make([]string, 0) resume := false fullCommand := "restore" operationId, _ := uuid.NewUUID() @@ -1345,6 +1360,10 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) configsOnly = true fullCommand += " --configs-only" } + if skipProjectionsFromQuery, exist := api.getQueryParameter(query, "skip-projections"); exist { + skipProjections = append(skipProjections, skipProjectionsFromQuery) + fullCommand += " --skip-projections=" + strings.Join(skipProjections, ",") + } if _, exist := query["resumable"]; exist { resume = true fullCommand += " --resumable" @@ -1368,7 +1387,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("restore", 0, func() error { b := backup.NewBackuper(api.config) - return b.Restore(name, tablePattern, databaseMappingToRestore, tableMappingToRestore, partitionsToBackup, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume, api.cliApp.Version, commandId) + return b.Restore(name, tablePattern, databaseMappingToRestore, tableMappingToRestore, partitionsToBackup, skipProjections, schemaOnly, dataOnly, dropExists, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume, api.cliApp.Version, commandId) }) go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), true); metricsErr != nil { @@ -1627,9 +1646,10 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e } } if lastBackupUpload != nil { - api.metrics.LastFinish["upload"].Set(float64(lastBackupCreateRemote.Unix())) if lastBackupCreateRemote == nil || lastBackupUpload.Unix() > lastBackupCreateRemote.Unix() { api.metrics.LastFinish["create_remote"].Set(float64(lastBackupUpload.Unix())) + } else { + api.metrics.LastFinish["upload"].Set(float64(lastBackupCreateRemote.Unix())) } } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index d0ee86b9..c69e27ba 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1877,17 +1877,75 @@ func TestTablePatterns(t *testing.T) { } func TestProjections(t *testing.T) { - var err error if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") == -1 { t.Skipf("Test skipped, PROJECTION available only 21.8+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } + var err error + var counts uint64 + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) - r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - err = env.ch.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") + + // create --skip-projection + env.queryWithNoError(r, "CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create", "--skip-projections", "default.*", "test_skip_projections") + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "ls -l /var/lib/clickhouse/backup/test_skip_projections/shadow/default/table_with_projection/default/*/*.proj") + r.Error(err) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "upload", "--delete-source", "test_skip_projections") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "download", "test_skip_projections") + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "ls -l /var/lib/clickhouse/backup/test_skip_projections/shadow/default/table_with_projection/default/*/*.proj") + r.Error(err) + env.queryWithNoError(r, "DROP TABLE default.table_with_projection NO DELAY") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore", "test_skip_projections") + counts = 0 + r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) + r.Equal(uint64(5), counts) + env.queryWithNoError(r, "DROP TABLE default.table_with_projection NO DELAY") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_skip_projections") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_skip_projections") + + // upload --skip-projection + env.queryWithNoError(r, "CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create", "test_skip_projections") + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "ls -l /var/lib/clickhouse/backup/test_skip_projections/shadow/default/table_with_projection/default/*/*.proj") + r.NoError(err) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "upload", "--skip-projections", "default.*", "--delete-source", "test_skip_projections") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "download", "test_skip_projections") + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "ls -l /var/lib/clickhouse/backup/test_skip_projections/shadow/default/table_with_projection/default/*/*.proj") + r.Error(err) + env.queryWithNoError(r, "DROP TABLE default.table_with_projection NO DELAY") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore", "test_skip_projections") + counts = 0 + r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) + r.Equal(uint64(5), counts) + env.queryWithNoError(r, "DROP TABLE default.table_with_projection NO DELAY") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_skip_projections") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_skip_projections") + + // restore --skip-projection + env.queryWithNoError(r, "CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") + env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create", "test_skip_projections") + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "ls -l /var/lib/clickhouse/backup/test_skip_projections/shadow/default/table_with_projection/default/*/*.proj") r.NoError(err) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "upload", "--delete-source", "test_skip_projections") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "download", "test_skip_projections") + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "ls -l /var/lib/clickhouse/backup/test_skip_projections/shadow/default/table_with_projection/default/*/*.proj") + r.NoError(err) + env.queryWithNoError(r, "DROP TABLE default.table_with_projection NO DELAY") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore", "--skip-projections", "default.*", "test_skip_projections") + counts = 0 + r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) + r.Equal(uint64(5), counts) + err = env.ch.Query("DROP TABLE default.table_with_projection NO DELAY") + r.NoError(err) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_skip_projections") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_skip_projections") + env.queryWithNoError(r, "CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create_remote", "test_backup_projection_full") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full") @@ -1899,7 +1957,7 @@ func TestProjections(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_increment") - var counts uint64 + counts = 0 r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) r.Equal(uint64(10), counts) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") >= 0 { @@ -1908,13 +1966,13 @@ func TestProjections(t *testing.T) { r.Equal(uint64(10), counts) } - err = env.ch.Query("DROP TABLE default.table_with_projection NO DELAY") - r.NoError(err) + env.queryWithNoError(r, "DROP TABLE default.table_with_projection NO DELAY") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment") + env.Cleanup(t, r) }