Skip to content

Commit

Permalink
- use os.Link instead os.Rename for ClickHouse 21.4+, to properly…
Browse files Browse the repository at this point in the history
… create backup object disks

- ignore `frozen_metadata` during, create, upload, download and restore commands, fix #826
- `allow_parallel: true` doesn't work after execute list command, fix #827
  • Loading branch information
Slach committed Feb 9, 2024
1 parent 78ae58d commit a06afa8
Show file tree
Hide file tree
Showing 10 changed files with 99 additions and 39 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,8 @@ jobs:
# don't change it to avoid broken CI/CD!!!
# RUN_TESTS: "TestFIPS"
# LOG_LEVEL: "debug"
# SFTP_DEBUG: "true"
# AZBLOB_DEBUG: "true"
# FTP_DEBUG: "true"
# S3_DEBUG: "true"
CGO_ENABLED: 0
Expand Down
4 changes: 3 additions & 1 deletion ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ IMPROVEMENTS

BUG FIXES
- force set `RefCount` to 0 during `restore` for parts in S3/GCS over S3/Azure disks, for properly works DROP TABLE / DROP DATABASE
- ignore frozen-metadata during, create, upload, download, restore command, fix [826](https://github.com/Altinity/clickhouse-backup/issues/826)
- use `os.Link` instead `os.Rename` for ClickHouse 21.4+, to properly create backup object disks
- ignore `frozen_metadata` during, create, upload, download and restore commands, fix [826](https://github.com/Altinity/clickhouse-backup/issues/826)
- `allow_parallel: true` doesn't work after execute list command, fix [827](https://github.com/Altinity/clickhouse-backup/issues/827)

# v2.4.28
IMPROVEMENT
Expand Down
5 changes: 3 additions & 2 deletions pkg/backup/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku
return nil, nil, err
}
// If partitionsIdsMap is not empty, only parts in this partition will back up.
parts, size, err := filesystemhelper.MoveShadow(shadowPath, backupShadowPath, partitionsIdsMap)
parts, size, err := filesystemhelper.MoveShadow(shadowPath, backupShadowPath, partitionsIdsMap, version)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -665,7 +665,8 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName, backup
if fInfo.IsDir() {
return nil
}
if fInfo.Name() == "frozen_metadata.txt" {
// fix https://github.com/Altinity/clickhouse-backup/issues/826
if strings.Contains(fInfo.Name(), "frozen_metadata") {
return nil
}
objPartFileMeta, err := object_disk.ReadMetadataFromFile(fPath)
Expand Down
11 changes: 7 additions & 4 deletions pkg/backup/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -890,19 +890,22 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin
if fInfo.IsDir() {
return nil
}
if fInfo.Name() == "frozen_metadata.txt" {
// fix https://github.com/Altinity/clickhouse-backup/issues/826
if strings.Contains(fInfo.Name(), "frozen_metadata") {
return nil
}
objMeta, err := object_disk.ReadMetadataFromFile(fPath)
if err != nil {
return err
}
if objMeta.StorageObjectCount < 1 && objMeta.Version != object_disk.VersionRelativePath {
if objMeta.StorageObjectCount < 1 && objMeta.Version < object_disk.VersionRelativePath {
return fmt.Errorf("%s: invalid object_disk.Metadata: %#v", fPath, objMeta)
}
//to allow delete Object Disk Data after DROP TABLE/DATABASE ...SYNC
if objMeta.RefCount > 0 {
//to allow deleting Object Disk Data during DROP TABLE/DATABASE ...SYNC
if objMeta.RefCount > 0 || objMeta.ReadOnly {
objMeta.RefCount = 0
objMeta.ReadOnly = false
log.Debugf("%s %#v set RefCount=0 and ReadOnly=0", fPath, objMeta.StorageObjects)
if writeMetaErr := object_disk.WriteMetadataToFile(objMeta, fPath); writeMetaErr != nil {
return fmt.Errorf("%s: object_disk.WriteMetadataToFile return error: %v", fPath, writeMetaErr)
}
Expand Down
17 changes: 15 additions & 2 deletions pkg/filesystemhelper/filesystemhelper.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,10 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM
if err != nil {
return err
}
// fix https://github.com/Altinity/clickhouse-backup/issues/826
if strings.Contains(info.Name(), "frozen_metadata") {
return nil
}
filename := strings.Trim(strings.TrimPrefix(filePath, srcPartPath), "/")
dstFilePath := filepath.Join(dstPartPath, filename)
if info.IsDir() {
Expand Down Expand Up @@ -207,11 +211,16 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa
return ok
}

func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap) ([]metadata.Part, int64, error) {
func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, version int) ([]metadata.Part, int64, error) {
log := apexLog.WithField("logger", "MoveShadow")
size := int64(0)
parts := make([]metadata.Part, 0)
err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error {
// fix https://github.com/Altinity/clickhouse-backup/issues/826
if strings.Contains(info.Name(), "frozen_metadata") {
return nil
}

// possible relative path
// store / 1f9 / 1f9dc899-0de9-41f8-b95c-26c1f0d67d93 / 20181023_2_2_0 / checksums.txt
// store / 1f9 / 1f9dc899-0de9-41f8-b95c-26c1f0d67d93 / 20181023_2_2_0 / x.proj / checksums.txt
Expand Down Expand Up @@ -239,7 +248,11 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E
return nil
}
size += info.Size()
return os.Rename(filePath, dstFilePath)
if version < 21004000 {
return os.Rename(filePath, dstFilePath)
} else {
return os.Link(filePath, dstFilePath)
}
})
return parts, size, err
}
Expand Down
15 changes: 9 additions & 6 deletions pkg/status/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,16 +72,19 @@ func (status *AsyncStatus) CheckCommandInProgress(command string) bool {
return false
}

// InProgress any .Status == InProgressStatus command shall return true, https://github.com/Altinity/clickhouse-backup/issues/827
func (status *AsyncStatus) InProgress() bool {
status.RLock()
defer status.RUnlock()
n := len(status.commands) - 1
if n < 0 {
status.log.Debugf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands))
return false
for n := range status.commands {
if status.commands[n].Status == InProgressStatus {
status.log.Debugf("api.status.inProgress -> status.commands[%d].Status == %s, inProgress=%v", n, status.commands[n].Status, status.commands[n].Status == InProgressStatus)
return true
}
}
status.log.Debugf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus)
return status.commands[n].Status == InProgressStatus

status.log.Debugf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands))
return false
}

func (status *AsyncStatus) GetContextWithCancel(commandId int) (context.Context, context.CancelFunc, error) {
Expand Down
24 changes: 20 additions & 4 deletions test/integration/dynamic_settings.sh
Original file line number Diff line number Diff line change
Expand Up @@ -237,11 +237,27 @@ cat <<EOT > /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml
</clickhouse>
EOT

cat <<EOT > /etc/clickhouse-server/config.d/zero_copy_replication.xml
# zero replication is buggy
#cat <<EOT > /etc/clickhouse-server/config.d/zero_copy_replication.xml
#<yandex>
# <merge_tree>
# <allow_remote_fs_zero_copy_replication>1</allow_remote_fs_zero_copy_replication>
# </merge_tree>
#</yandex>
#EOT

cat <<EOT > /etc/clickhouse-server/config.d/zookeeper_log.xml
<yandex>
<merge_tree>
<allow_remote_fs_zero_copy_replication>1</allow_remote_fs_zero_copy_replication>
</merge_tree>
<zookeeper_log>
<database>system</database>
<table>zookeeper_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
<max_size_rows>1048576</max_size_rows>
<reserved_size_rows>8192</reserved_size_rows>
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
<flush_on_crash>true</flush_on_crash>
</zookeeper_log>
</yandex>
EOT
fi
Expand Down
53 changes: 36 additions & 17 deletions test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
package main

import (
"bufio"
"context"
"encoding/json"
"fmt"
"github.com/Altinity/clickhouse-backup/v2/pkg/config"
"github.com/Altinity/clickhouse-backup/v2/pkg/logcli"
Expand Down Expand Up @@ -442,6 +444,7 @@ func TestS3NoDeletePermission(t *testing.T) {
r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup"))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote"))
checkObjectStorageIsEmpty(r, "S3")
}

// TestDoRestoreRBAC need clickhouse-server restart, no parallel
Expand Down Expand Up @@ -808,6 +811,29 @@ func testAPIBackupDelete(r *require.Assertions) {
out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics")
r.NoError(err)
r.Contains(out, "clickhouse_backup_last_delete_status 1")

out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XGET 'http://localhost:7171/backup/list'"))
log.Infof(out)
r.NoError(err)
scanner := bufio.NewScanner(strings.NewReader(out))
for scanner.Scan() {
type backupJSON struct {
Name string `json:"name"`
Created string `json:"created"`
Size uint64 `json:"size,omitempty"`
Location string `json:"location"`
RequiredBackup string `json:"required"`
Desc string `json:"desc"`
}
listItem := backupJSON{}
r.NoError(json.Unmarshal(scanner.Bytes(), &listItem))
out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/%s/%s'", listItem.Location, listItem.Name))
log.Infof(out)
r.NoError(err)
}

r.NoError(scanner.Err())

}

func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) {
Expand Down Expand Up @@ -1122,15 +1148,16 @@ func TestTablePatterns(t *testing.T) {
if createPattern {
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName))
} else {
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", testBackupName))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName))
}

r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName))
dropDatabasesFromTestDataDataSet(t, r, ch, databaseList)

if restorePattern {
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName))
} else {
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", testBackupName))
r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", testBackupName))
}

restored := uint64(0)
Expand All @@ -1157,6 +1184,7 @@ func TestTablePatterns(t *testing.T) {

}
}
checkObjectStorageIsEmpty(r, "S3")
}

func TestProjections(t *testing.T) {
Expand Down Expand Up @@ -1272,6 +1300,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) {
r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name())))
r.Equal(uint64(200), res)
fullCleanup(t, r, ch, backupNames, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml")
checkObjectStorageIsEmpty(r, "S3")
}

func TestSyncReplicaTimeout(t *testing.T) {
Expand Down Expand Up @@ -1651,6 +1680,7 @@ func TestIntegrationS3Glacier(t *testing.T) {

func TestIntegrationS3(t *testing.T) {
//t.Parallel()
checkObjectStorageIsEmpty(require.New(t), "S3")
runMainIntegrationScenario(t, "S3", "config-s3.yml")
}

Expand Down Expand Up @@ -2022,10 +2052,10 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st
fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true, backupConfig)
}
replaceStorageDiskNameForReBalance(r, ch, remoteStorageType, true)
checkObjectDiskBackupIsEmpty(r, remoteStorageType)
checkObjectStorageIsEmpty(r, remoteStorageType)
}

func checkObjectDiskBackupIsEmpty(r *require.Assertions, remoteStorageType string) {
func checkObjectStorageIsEmpty(r *require.Assertions, remoteStorageType string) {
if remoteStorageType == "AZBLOB" {
r.NoError(dockerExec("azure", "apk", "add", "jq"))
checkBlobCollection := func(containerName string, expected string) {
Expand Down Expand Up @@ -2351,17 +2381,6 @@ func generateIncrementTestData(t *testing.T, ch *TestClickHouse, r *require.Asse
}

func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestClickHouse, databaseList []string) {
ch.queryWithNoError(r, "SYSTEM RESTART REPLICAS")
type readOnlyTable struct {
Database string `ch:"database"`
Table string `ch:"table"`
IsReadOnly string `ch:"is_readonly"`
}
allReadonly := make([]readOnlyTable, 0)
r.NoError(ch.chbackend.StructSelect(&allReadonly, "SELECT database,table,is_readonly FROM system.replicas WHERE is_readonly"))
t.Logf("Current ReadOnly replicas %#v", allReadonly)

r.Equal(0, len(allReadonly))
log.Info("Drop all databases")
for _, db := range databaseList {
r.NoError(ch.dropDatabase(db + "_" + t.Name()))
Expand Down
5 changes: 3 additions & 2 deletions test/integration/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,12 @@ export GLACIER_TESTS=${GLACIER_TESTS:-0}

export AZURE_TESTS=${AZURE_TESTS:-1}
export RUN_ADVANCED_TESTS=${RUN_ADVANCED_TESTS:-1}
export GODEBUG=${GODEBUG:-}
export S3_DEBUG=${S3_DEBUG:-false}
export GCS_DEBUG=${GCS_DEBUG:-false}
export FTP_DEBUG=${FTP_DEBUG:-false}
export SFTP_DEBUG=${SFTP_DEBUG:-false}
export GODEBUG=${GODEBUG:-}
export AZBLOB_DEBUG=${AZBLOB_DEBUG:-false}
export CLICKHOUSE_DEBUG=${CLICKHOUSE_DEBUG:-false}

if [[ "${CLICKHOUSE_VERSION}" == 2* || "${CLICKHOUSE_VERSION}" == "head" ]]; then
Expand All @@ -47,4 +48,4 @@ docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} up -d
docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} exec minio mc alias list

go test -parallel ${RUN_PARALLEL:-$(nproc)} -timeout ${TESTS_TIMEOUT:-30m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go
go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out"
go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out"
Loading

0 comments on commit a06afa8

Please sign in to comment.