From eea4f62e3396b099cc65eca54076cc5506219faa Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 14 Jun 2023 12:35:53 +0400 Subject: [PATCH] zerolog completelly useless for multiple go-routines ;( https://github.com/rs/zerolog/issues/555, try to remove all local loggers with context upload.go --- pkg/backup/backuper.go | 2 - pkg/backup/create.go | 88 ++++++-------- pkg/backup/delete.go | 17 ++- pkg/backup/download.go | 147 +++++++++++------------ pkg/backup/list.go | 29 ++--- pkg/backup/restore.go | 88 ++++++-------- pkg/backup/table_pattern.go | 3 +- pkg/backup/upload.go | 43 +++---- pkg/backup/watch.go | 14 ++- pkg/clickhouse/clickhouse.go | 62 +++++----- pkg/filesystemhelper/filesystemhelper.go | 23 ++-- pkg/metadata/load.go | 3 +- pkg/resumable/state.go | 19 ++- pkg/server/metrics/metrics.go | 17 ++- pkg/server/server.go | 138 ++++++++++----------- pkg/server/utils.go | 13 +- pkg/status/status.go | 26 ++-- pkg/storage/ftp.go | 19 ++- pkg/storage/general.go | 81 +++++-------- pkg/storage/s3.go | 19 ++- test/integration/integration_test.go | 1 - 21 files changed, 383 insertions(+), 469 deletions(-) diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index ee67b2e1..b3e9d26b 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -7,7 +7,6 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/storage" - "github.com/rs/zerolog/log" "path" ) @@ -26,7 +25,6 @@ type Backuper struct { func NewBackuper(cfg *config.Config) *Backuper { ch := &clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } return &Backuper{ cfg: cfg, diff --git a/pkg/backup/create.go b/pkg/backup/create.go index c492679e..9c5b2a94 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "github.com/Altinity/clickhouse-backup/pkg/status" - "github.com/rs/zerolog" "os" "path" "path/filepath" @@ -50,7 +49,6 @@ func addTable(tables []clickhouse.Table, table clickhouse.Table) []clickhouse.Ta } func filterTablesByPattern(tables []clickhouse.Table, tablePattern string) []clickhouse.Table { - logger := log.With().Str("logger", "filterTablesByPattern").Logger() if tablePattern == "" { return tables } @@ -62,7 +60,7 @@ func filterTablesByPattern(tables []clickhouse.Table, tablePattern string) []cli if matched, _ := filepath.Match(strings.Trim(pattern, " \t\n\r"), tableName); matched { result = addTable(result, t) } else { - logger.Debug().Msgf("%s not matched with %s", tableName, pattern) + log.Debug().Msgf("%s not matched with %s", tableName, pattern) } } } @@ -90,10 +88,6 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st backupName = NewBackupName() } backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "create", - }).Logger() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -140,9 +134,9 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st partitionsToBackupMap, partitions := filesystemhelper.CreatePartitionsToBackupMap(ctx, b.ch, tables, nil, partitions) // create if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitions, partitionsToBackupMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, logger, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitions, partitionsToBackupMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, startBackup, version) } else { - err = b.createBackupLocal(ctx, backupName, partitionsToBackupMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, allDatabases, allFunctions, logger, startBackup) + err = b.createBackupLocal(ctx, backupName, partitionsToBackupMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, allDatabases, allFunctions, startBackup) } if err != nil { return err @@ -155,7 +149,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st return nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsToBackupMap common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, logger zerolog.Logger, startBackup time.Time) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsToBackupMap common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, startBackup time.Time) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -172,7 +166,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } if _, err := os.Stat(backupPath); os.IsNotExist(err) { if err = filesystemhelper.Mkdir(backupPath, b.ch, disks); err != nil { - logger.Error().Msgf("can't create directory %s: %v", backupPath, err) + log.Error().Msgf("can't create directory %s: %v", backupPath, err) return err } } @@ -184,24 +178,23 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par case <-ctx.Done(): return ctx.Err() default: - tableLog := logger.With().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Logger() if table.Skip { continue } var realSize map[string]int64 var disksToPartsMap map[string][]metadata.Part if doBackupData { - tableLog.Debug().Msg("create data") + log.Debug().Msg("create data") shadowBackupUUID := strings.ReplaceAll(uuid.New().String(), "-", "") disksToPartsMap, realSize, err = b.AddTableToBackup(ctx, backupName, shadowBackupUUID, disks, &table, partitionsToBackupMap) if err != nil { - tableLog.Error().Msg(err.Error()) + log.Error().Msg(err.Error()) if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - tableLog.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } // fix corner cases after https://github.com/Altinity/clickhouse-backup/issues/379 if cleanShadowErr := b.Clean(ctx); cleanShadowErr != nil { - tableLog.Error().Msg(cleanShadowErr.Error()) + log.Error().Msg(cleanShadowErr.Error()) } return err } @@ -211,19 +204,19 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } } // https://github.com/Altinity/clickhouse-backup/issues/529 - tableLog.Debug().Msg("get in progress mutations list") + log.Debug().Msg("get in progress mutations list") inProgressMutations := make([]metadata.MutationMetadata, 0) if b.cfg.ClickHouse.BackupMutations { inProgressMutations, err = b.ch.GetInProgressMutations(ctx, table.Database, table.Name) if err != nil { - tableLog.Error().Msg(err.Error()) + log.Error().Msg(err.Error()) if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - tableLog.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } } - tableLog.Debug().Msg("create metadata") + log.Debug().Msg("create metadata") metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ Table: table.Name, @@ -237,7 +230,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par }, disks) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - tableLog.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } @@ -246,35 +239,35 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par Database: table.Database, Table: table.Name, }) - tableLog.Info().Msgf("done") + log.Info().Str("database", table.Database).Str("table", table.Name).Msgf("done") } } backupRBACSize, backupConfigSize := uint64(0), uint64(0) if rbacOnly { if backupRBACSize, err = b.createRBACBackup(ctx, backupPath, disks); err != nil { - logger.Error().Msgf("error during do RBAC backup: %v", err) + log.Error().Msgf("error during do RBAC backup: %v", err) } else { - logger.Info().Str("size", utils.FormatBytes(backupRBACSize)).Msg("done createRBACBackup") + log.Info().Str("size", utils.FormatBytes(backupRBACSize)).Msg("done createRBACBackup") } } if configsOnly { if backupConfigSize, err = b.createConfigBackup(ctx, backupPath); err != nil { - logger.Error().Msgf("error during do CONFIG backup: %v", err) + log.Error().Msgf("error during do CONFIG backup: %v", err) } else { - logger.Info().Str("size", utils.FormatBytes(backupConfigSize)).Msg("done createConfigBackup") + log.Info().Str("size", utils.FormatBytes(backupConfigSize)).Msg("done createConfigBackup") } } backupMetaFile := path.Join(defaultPath, "backup", backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, logger); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions); err != nil { return err } - logger.Info().Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done") + log.Info().Str("operation", "create").Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done") return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitions []string, partitionsToBackupMap common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap map[string]string, logger zerolog.Logger, startBackup time.Time, backupVersion string) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitions []string, partitionsToBackupMap common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap map[string]string, startBackup time.Time, backupVersion string) error { if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists { return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) } @@ -354,7 +347,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa }{Size: 0}) } - logger.Debug().Msg("calculate parts list from embedded backup disk") + log.Debug().Msg("calculate parts list from embedded backup disk") for _, table := range tables { select { case <-ctx.Done(): @@ -366,7 +359,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa disksToPartsMap, err := b.getPartsFromBackupDisk(backupPath, table, partitionsToBackupMap) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - logger.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } @@ -381,7 +374,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa }, disks) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - logger.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } @@ -389,11 +382,11 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa } } backupMetaFile := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions, logger); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions); err != nil { return err } - logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "create_embedded", "duration": utils.HumanizeDuration(time.Since(startBackup)), }).Msg("done") @@ -438,14 +431,13 @@ func (b *Backuper) getPartsFromBackupDisk(backupPath string, table clickhouse.Ta } func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (uint64, error) { - logger := log.With().Str("logger", "createConfigBackup").Logger() select { case <-ctx.Done(): return 0, ctx.Err() default: backupConfigSize := uint64(0) configBackupPath := path.Join(backupPath, "configs") - logger.Debug().Msgf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) + log.Debug().Msgf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) copyErr := recursiveCopy.Copy(b.cfg.ClickHouse.ConfigDir, configBackupPath, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { backupConfigSize += uint64(srcinfo.Size()) @@ -457,7 +449,6 @@ func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (u } func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disks []clickhouse.Disk) (uint64, error) { - logger := log.With().Str("logger", "createRBACBackup").Logger() select { case <-ctx.Done(): return 0, ctx.Err() @@ -468,7 +459,7 @@ func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disk if err != nil { return 0, err } - logger.Debug().Msgf("copy %s -> %s", accessPath, rbacBackup) + log.Debug().Msgf("copy %s -> %s", accessPath, rbacBackup) copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { rbacDataSize += uint64(srcinfo.Size()) @@ -480,17 +471,12 @@ func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disk } func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsToBackupMap common.EmptyMap) (map[string][]metadata.Part, map[string]int64, error) { - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "create", - "table": fmt.Sprintf("%s.%s", table.Database, table.Name), - }).Logger() if backupName == "" { return nil, nil, fmt.Errorf("backupName is not defined") } if !strings.HasSuffix(table.Engine, "MergeTree") && table.Engine != "MaterializedMySQL" && table.Engine != "MaterializedPostgreSQL" { - logger.Warn().Str("engine", table.Engine).Msg("supports only schema backup") + log.Warn().Str("engine", table.Engine).Msg("supports only schema backup") return nil, nil, nil } if b.cfg.ClickHouse.CheckPartsColumns { @@ -502,7 +488,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku if err := b.ch.FreezeTable(ctx, table, shadowBackupUUID); err != nil { return nil, nil, err } - logger.Debug().Msg("frozen") + log.Debug().Msg("frozen") version, err := b.ch.GetVersion(ctx) if err != nil { return nil, nil, err @@ -531,7 +517,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku } realSize[disk.Name] = size disksToPartsMap[disk.Name] = parts - logger.Debug().Str("disk", disk.Name).Msg("shadow moved") + log.Debug().Str("disk", disk.Name).Msg("shadow moved") // Clean all the files under the shadowPath, cause UNFREEZE unavailable if version < 21004000 { if err := os.RemoveAll(shadowPath); err != nil { @@ -546,11 +532,15 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku return disksToPartsMap, realSize, err } } - logger.Debug().Msg("done") + log.Debug().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "create", + "table": fmt.Sprintf("%s.%s", table.Database, table.Name), + }).Msg("done") return disksToPartsMap, realSize, nil } -func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, logger zerolog.Logger) error { +func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function) error { select { case <-ctx.Done(): return ctx.Err() @@ -586,7 +576,7 @@ func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, bac return err } if err := filesystemhelper.Chown(backupMetaFile, b.ch, disks, false); err != nil { - logger.Warn().Msgf("can't chown %s: %v", backupMetaFile, err) + log.Warn().Msgf("can't chown %s: %v", backupMetaFile, err) } return nil } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index a457a04d..1355ff80 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -18,7 +18,6 @@ import ( // Clean - removed all data in shadow folder func (b *Backuper) Clean(ctx context.Context) error { - logger := log.With().Str("logger", "Clean").Logger() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -36,7 +35,7 @@ func (b *Backuper) Clean(ctx context.Context) error { if err := b.cleanDir(shadowDir); err != nil { return fmt.Errorf("can't clean '%s': %v", shadowDir, err) } - logger.Info().Msg(shadowDir) + log.Info().Msg(shadowDir) } return nil } @@ -95,7 +94,6 @@ func (b *Backuper) RemoveOldBackupsLocal(ctx context.Context, keepLastBackup boo } func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - logger := log.With().Str("logger", "RemoveBackupLocal").Logger() var err error start := time.Now() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") @@ -120,13 +118,13 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis if disk.IsBackup { backupPath = path.Join(disk.Path, backupName) } - logger.Debug().Msgf("remove '%s'", backupPath) + log.Debug().Msgf("remove '%s'", backupPath) err = os.RemoveAll(backupPath) if err != nil { return err } } - logger.Info().Str("operation", "delete"). + log.Info().Str("operation", "delete"). Str("location", "local"). Str("backup", backupName). Str("duration", utils.HumanizeDuration(time.Since(start))). @@ -138,12 +136,11 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis } func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) error { - logger := log.With().Str("logger", "RemoveBackupRemote").Logger() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") start := time.Now() if b.cfg.General.RemoteStorage == "none" { err := errors.New("aborted: RemoteStorage set to \"none\"") - logger.Error().Msg(err.Error()) + log.Error().Msg(err.Error()) return err } if b.cfg.General.RemoteStorage == "custom" { @@ -164,7 +161,7 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er } defer func() { if err := bd.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -175,10 +172,10 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er for _, backup := range backupList { if backup.BackupName == backupName { if err := bd.RemoveBackup(ctx, backup); err != nil { - logger.Warn().Msgf("bd.RemoveBackup return error: %v", err) + log.Warn().Msgf("bd.RemoveBackup return error: %v", err) return err } - logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "backup": backupName, "location": "remote", "operation": "delete", diff --git a/pkg/backup/download.go b/pkg/backup/download.go index a133a6b2..f5dca9f1 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -12,7 +12,6 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/eapache/go-resiliency/retrier" - "github.com/rs/zerolog" "io" "os" "path" @@ -37,10 +36,6 @@ var ( ) func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error { - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "download_legacy", - }).Logger() bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, true, "") if err != nil { return err @@ -50,7 +45,7 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error } defer func() { if err := bd.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -60,7 +55,10 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error if err != nil { return err } - logger.Info().Msg("done") + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "download_legacy", + }).Msg("done") return nil } @@ -77,10 +75,6 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer b.ch.Close() - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "download", - }).Logger() if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for download, change you config or use REMOTE_STORAGE environment variable") } @@ -104,7 +98,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if strings.Contains(localBackups[i].Tags, "embedded") || b.cfg.General.RemoteStorage == "custom" { return ErrBackupIsAlreadyExists } - logger.Warn().Msgf("%s already exists will try to resume download", backupName) + log.Warn().Msgf("%s already exists will try to resume download", backupName) } } } @@ -117,7 +111,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer func() { if err := b.dst.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -145,7 +139,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if schemaOnly { return fmt.Errorf("'%s' is old format backup and doesn't supports download of schema only", backupName) } - logger.Warn().Msgf("'%s' is old-format backup", backupName) + log.Warn().Msgf("'%s' is old-format backup", backupName) return b.legacyDownload(ctx, backupName) } if len(remoteBackup.Tables) == 0 && !b.cfg.General.AllowEmptyBackups { @@ -180,20 +174,19 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ }) } - logger.Debug().Msgf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) + log.Debug().Str("backup", backupName).Msgf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) downloadSemaphore := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) metadataGroup, metadataCtx := errgroup.WithContext(ctx) for i, t := range tablesForDownload { if err := downloadSemaphore.Acquire(metadataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Download metadata: %v", err) + log.Error().Msgf("can't acquire semaphore during Download metadata: %v", err) break } idx := i tableTitle := t metadataGroup.Go(func() error { defer downloadSemaphore.Release(1) - tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)).Logger() - downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableLogger, tableTitle, schemaOnly, partitions) + downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableTitle, schemaOnly, partitions) if err != nil { return err } @@ -210,11 +203,11 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ for disk := range t.Parts { if _, diskExists := b.DiskToPathMap[disk]; !diskExists && disk != b.cfg.ClickHouse.EmbeddedBackupDisk { b.DiskToPathMap[disk] = b.DiskToPathMap["default"] - logger.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %s", t.Database, t.Table, disk, b.DiskToPathMap["default"]) + log.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %s", t.Database, t.Table, disk, b.DiskToPathMap["default"]) } } } - logger.Debug().Msgf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) + log.Debug().Str("backupName", backupName).Msgf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) dataGroup, dataCtx := errgroup.WithContext(ctx) for i, tableMetadata := range tableMetadataAfterDownload { @@ -222,7 +215,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ continue } if err := downloadSemaphore.Acquire(dataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Download table data: %v", err) + log.Error().Msgf("can't acquire semaphore during Download table data: %v", err) break } dataSize += tableMetadata.TotalBytes @@ -233,7 +226,8 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, tableMetadataAfterDownload[idx]); err != nil { return err } - logger.Info(). + log.Info(). + Str("backup_name", backupName). Str("operation", "download_data"). Str("table", fmt.Sprintf("%s.%s", tableMetadataAfterDownload[idx].Database, tableMetadataAfterDownload[idx].Table)). Str("duration", utils.HumanizeDuration(time.Since(start))). @@ -297,24 +291,26 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ b.resumableState.Close() } - logger.Info(). + log.Info(). + Str("operation", "download"). + Str("backup", backupName). Str("duration", utils.HumanizeDuration(time.Since(startDownload))). Str("size", utils.FormatBytes(dataSize+metadataSize+rbacSize+configSize)). Msg("done") return nil } -func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, logger zerolog.Logger, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { +func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { metadataLocalFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.json", common.TablePathEncode(tableTitle.Table))) tm := &metadata.TableMetadata{} if _, err := tm.Load(metadataLocalFile); err == nil { return tm, nil } - tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, logger.With().Fields(map[string]interface{}{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}).Logger(), tableTitle, false, nil) + tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, tableTitle, false, nil) return tm, err } -func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, logger zerolog.Logger, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { +func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { start := time.Now() size := uint64(0) metadataFiles := map[string]string{} @@ -398,8 +394,9 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, b.resumableState.AppendToState(localMetadataFile, written) } } - logger = logger.With().Logger() - logger.Info(). + log.Info(). + Str("operation", "download_metadata"). + Str("backup", backupName). Str("duration", utils.HumanizeDuration(time.Since(start))). Str("size", utils.FormatBytes(size)). Msg("done") @@ -415,7 +412,6 @@ func (b *Backuper) downloadConfigData(ctx context.Context, remoteBackup storage. } func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup storage.Backup, prefix string) (uint64, error) { - logger := log.With().Str("logger", "downloadBackupRelatedDir").Logger() archiveFile := fmt.Sprintf("%s.%s", prefix, b.cfg.GetArchiveExtension()) remoteFile := path.Join(remoteBackup.BackupName, archiveFile) if b.resume { @@ -426,7 +422,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st localDir := path.Join(b.DefaultDataPath, "backup", remoteBackup.BackupName, prefix) remoteFileInfo, err := b.dst.StatFile(ctx, remoteFile) if err != nil { - logger.Debug().Msgf("%s not exists on remote storage, skip download", remoteFile) + log.Debug().Msgf("%s not exists on remote storage, skip download", remoteFile) return 0, nil } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -443,7 +439,6 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata) error { - logger := log.With().Str("logger", "downloadTableData").Logger() dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) s := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) @@ -456,7 +451,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. capacity += len(table.Files[disk]) downloadOffset[disk] = 0 } - logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) breakByErrorArchive: for common.SumMapValuesInt(downloadOffset) < capacity { for disk := range table.Files { @@ -465,7 +460,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } archiveFile := table.Files[disk][downloadOffset[disk]] if err := s.Acquire(dataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore %s archive: %v", archiveFile, err) + log.Error().Msgf("can't acquire semaphore %s archive: %v", archiveFile, err) break breakByErrorArchive } tableLocalDir := b.getLocalBackupDataPathForTable(remoteBackup.BackupName, disk, dbAndTableDir) @@ -473,7 +468,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. tableRemoteFile := path.Join(remoteBackup.BackupName, "shadow", common.TablePathEncode(table.Database), common.TablePathEncode(table.Table), archiveFile) g.Go(func() error { defer s.Release(1) - logger.Debug().Msgf("start download %s", tableRemoteFile) + log.Debug().Msgf("start download %s", tableRemoteFile) if b.resume && b.resumableState.IsAlreadyProcessedBool(tableRemoteFile) { return nil } @@ -487,7 +482,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(tableRemoteFile, 0) } - logger.Debug().Msgf("finish download %s", tableRemoteFile) + log.Debug().Msgf("finish download %s", tableRemoteFile) return nil }) } @@ -497,7 +492,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. for disk := range table.Parts { capacity += len(table.Parts[disk]) } - logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) breakByErrorDirectory: for disk, parts := range table.Parts { @@ -513,13 +508,13 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } partRemotePath := path.Join(tableRemotePath, part.Name) if err := s.Acquire(dataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore %s directory: %v", partRemotePath, err) + log.Error().Msgf("can't acquire semaphore %s directory: %v", partRemotePath, err) break breakByErrorDirectory } partLocalPath := path.Join(tableLocalPath, part.Name) g.Go(func() error { defer s.Release(1) - logger.Debug().Msgf("start %s -> %s", partRemotePath, partLocalPath) + log.Debug().Msgf("start %s -> %s", partRemotePath, partLocalPath) if b.resume && b.resumableState.IsAlreadyProcessedBool(partRemotePath) { return nil } @@ -529,7 +524,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(partRemotePath, 0) } - logger.Debug().Msgf("finish %s -> %s", partRemotePath, partLocalPath) + log.Debug().Msgf("finish %s -> %s", partRemotePath, partLocalPath) return nil }) } @@ -550,8 +545,10 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string) error { - logger := log.With().Str("operation", "downloadDiffParts").Logger() - logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("start") + log.Debug(). + Str("operation", "downloadDiffParts"). + Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)). + Msg("start") start := time.Now() downloadedDiffParts := uint32(0) s := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) @@ -577,14 +574,14 @@ breakByError: } if err != nil && os.IsNotExist(err) { if err := s.Acquire(downloadDiffCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during downloadDiffParts: %v", err) + log.Error().Msgf("can't acquire semaphore during downloadDiffParts: %v", err) break breakByError } partForDownload := part diskForDownload := disk downloadDiffGroup.Go(func() error { defer s.Release(1) - tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload, logger) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload) if err != nil { return err } @@ -631,22 +628,26 @@ breakByError: if err := downloadDiffGroup.Wait(); err != nil { return fmt.Errorf("one of downloadDiffParts go-routine return error: %v", err) } - logger.Info().Str("duration", utils.HumanizeDuration(time.Since(start))).Str("diff_parts", strconv.Itoa(int(downloadedDiffParts))).Msg("done") + log.Info(). + Str("operation", "downloadDiffParts"). + Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Str("diff_parts", strconv.Itoa(int(downloadedDiffParts))). + Msg("done") return nil } func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLock *sync.Mutex, diffRemoteFilesCache map[string]*sync.Mutex, tableRemoteFile string, tableLocalDir string) error { - logger := log.With().Str("logger", "downloadDiffRemoteFile").Logger() diffRemoteFilesLock.Lock() namedLock, isCached := diffRemoteFilesCache[tableRemoteFile] if isCached { - logger.Debug().Msgf("wait download begin %s", tableRemoteFile) + log.Debug().Msgf("wait download begin %s", tableRemoteFile) namedLock.Lock() diffRemoteFilesLock.Unlock() namedLock.Unlock() - logger.Debug().Msgf("wait download end %s", tableRemoteFile) + log.Debug().Msgf("wait download end %s", tableRemoteFile) } else { - logger.Debug().Msgf("start download from %s", tableRemoteFile) + log.Debug().Msgf("start download from %s", tableRemoteFile) namedLock = &sync.Mutex{} diffRemoteFilesCache[tableRemoteFile] = namedLock namedLock.Lock() @@ -657,18 +658,18 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo return b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir) }) if err != nil { - logger.Warn().Msgf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + log.Warn().Msgf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } else { // remoteFile could be a directory if err := b.dst.DownloadPath(ctx, 0, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { - logger.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + log.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } namedLock.Unlock() - logger.Debug().Msgf("finish download from %s", tableRemoteFile) + log.Debug().Str("tableRemoteFile", tableRemoteFile).Msgf("finish download") } return nil } @@ -687,21 +688,21 @@ func (b *Backuper) checkNewPath(newPath string, part metadata.Part) error { return nil } -func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part, logger zerolog.Logger) (map[string]string, error) { +func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part) (map[string]string, error) { var requiredTable *metadata.TableMetadata - logger.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Msg("start") requiredBackup, err := b.ReadBackupMetadataRemote(ctx, backup.RequiredBackup) if err != nil { return nil, err } - requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, logger, metadata.TableTitle{Database: table.Database, Table: table.Table}) + requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, metadata.TableTitle{Database: table.Database, Table: table.Table}) if err != nil { - logger.Warn().Msgf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) + log.Warn().Msgf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) return nil, err } // recursive find if part in RequiredBackup also Required - tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, logger, table, requiredTable, part, disk) + tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, table, requiredTable, part, disk) if found { return tableRemoteFiles, nil } @@ -742,18 +743,18 @@ func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadat return nil, fmt.Errorf("%s.%s %s not found on %s and all required backups sequence", table.Database, table.Table, part.Name, requiredBackup.BackupName) } -func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, logger zerolog.Logger, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { - logger.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Msg("start") +func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Msg("start") found := false for _, requiredParts := range requiredTable.Parts { for _, requiredPart := range requiredParts { if requiredPart.Name == part.Name { found = true if requiredPart.Required { - tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part, logger) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part) if err != nil { found = false - logger.Warn().Msgf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) + log.Warn().Msgf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) } return tableRemoteFiles, found, err } @@ -768,8 +769,7 @@ func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metada } func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (map[string]string, error, bool) { - logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}).Logger() - logger.Debug().Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}).Msg("start") tableRemoteFiles := make(map[string]string) // find same disk and part name archive if requiredBackup.DataFormat != "directory" { @@ -788,8 +788,7 @@ func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata } func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}).Logger() - logger.Debug().Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}).Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, remoteDisk, part.Name) tableRemoteFile := path.Join(tableRemotePath, "checksums.txt") @@ -797,8 +796,7 @@ func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup } func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}).Logger() - logger.Debug().Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}).Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) remoteExt := config.ArchiveExtensions[requiredBackup.DataFormat] tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, fmt.Sprintf("%s_%s.%s", remoteDisk, common.TablePathEncode(part.Name), remoteExt)) @@ -807,10 +805,9 @@ func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *m } func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metadata.BackupMetadata, tableRemoteFile string, tableRemotePath string, localDisk string, dbAndTableDir string, part metadata.Part) (string, string, error) { - logger := log.With().Str("logger", "findDiffFileExist").Logger() _, err := b.dst.StatFile(ctx, tableRemoteFile) if err != nil { - logger.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist not found") + log.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist not found") return "", "", err } if tableLocalDir, diskExists := b.DiskToPathMap[localDisk]; !diskExists { @@ -821,7 +818,7 @@ func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metada } else { tableLocalDir = path.Join(tableLocalDir, "backup", requiredBackup.BackupName, "shadow", dbAndTableDir, localDisk) } - logger.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist found") + log.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist found") return tableRemotePath, tableLocalDir, nil } } @@ -840,14 +837,13 @@ func (b *Backuper) ReadBackupMetadataRemote(ctx context.Context, backupName stri } func (b *Backuper) makePartHardlinks(exists, new string) error { - logger := log.With().Str("logger", "makePartHardlinks").Logger() ex, err := os.Open(exists) if err != nil { return err } defer func() { if err = ex.Close(); err != nil { - logger.Warn().Msgf("Can't close %s", exists) + log.Warn().Msgf("Can't close %s", exists) } }() files, err := ex.Readdirnames(-1) @@ -855,7 +851,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { return err } if err := os.MkdirAll(new, 0750); err != nil { - logger.Warn().Msgf("MkDirAll(%s) error: %v", new, err) + log.Warn().Msgf("MkDirAll(%s) error: %v", new, err) return err } for _, f := range files { @@ -865,7 +861,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { existsFInfo, existsStatErr := os.Stat(existsF) newFInfo, newStatErr := os.Stat(newF) if existsStatErr != nil || newStatErr != nil || !os.SameFile(existsFInfo, newFInfo) { - logger.Warn().Msgf("Link %s -> %s error: %v", newF, existsF, err) + log.Warn().Msgf("Link %s -> %s error: %v", newF, existsF, err) return err } } @@ -877,7 +873,6 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri if b.resume && b.resumableState.IsAlreadyProcessedBool(remoteFile) { return nil } - logger := log.With().Str("logger", "downloadSingleBackupFile").Logger() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { remoteReader, err := b.dst.GetFileReader(ctx, remoteFile) @@ -887,7 +882,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = remoteReader.Close() if err != nil { - logger.Warn().Msgf("can't close remoteReader %s", remoteFile) + log.Warn().Msgf("can't close remoteReader %s", remoteFile) } }() localWriter, err := os.OpenFile(localFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) @@ -898,7 +893,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = localWriter.Close() if err != nil { - logger.Warn().Msgf("can't close localWriter %s", localFile) + log.Warn().Msgf("can't close localWriter %s", localFile) } }() diff --git a/pkg/backup/list.go b/pkg/backup/list.go index d490a090..4caa5dab 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -35,7 +35,6 @@ func (b *Backuper) List(what, format string) error { return nil } func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) error { - logger := log.With().Str("logger", "printBackupsRemote").Logger() switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -73,7 +72,7 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, uploadDate, "remote", required, description); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } default: @@ -83,7 +82,6 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) } func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBackup, format string) error { - logger := log.With().Str("logger", "printBackupsLocal").Logger() switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -122,7 +120,7 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, creationDate, "local", required, description); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } } @@ -134,7 +132,6 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku // PrintLocalBackups - print all backups stored locally func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { - logger := log.With().Str("logger", "PrintLocalBackups").Logger() if !b.ch.IsOpen { if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) @@ -144,7 +141,7 @@ func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, _, err := b.GetLocalBackups(ctx, nil) @@ -163,7 +160,6 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } defer b.ch.Close() } - logger := log.With().Str("logger", "GetLocalBackups").Logger() if disks == nil { disks, err = b.ch.GetDisks(ctx) if err != nil { @@ -244,7 +240,7 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) }) } if closeErr := d.Close(); closeErr != nil { - logger.Error().Msgf("can't close %s openError: %v", backupPath, closeErr) + log.Error().Msgf("can't close %s openError: %v", backupPath, closeErr) } } } @@ -262,10 +258,9 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { } defer b.ch.Close() } - logger := log.With().Str("logger", "PrintAllBackups").Logger() defer func() { if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() localBackups, _, err := b.GetLocalBackups(ctx, nil) @@ -273,7 +268,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsLocal(ctx, w, localBackups, format); err != nil { - logger.Warn().Msgf("printBackupsLocal return error: %v", err) + log.Warn().Msgf("printBackupsLocal return error: %v", err) } if b.cfg.General.RemoteStorage != "none" { @@ -282,7 +277,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsRemote(w, remoteBackups, format); err != nil { - logger.Warn().Msgf("printBackupsRemote return error: %v", err) + log.Warn().Msgf("printBackupsRemote return error: %v", err) } } return nil @@ -296,11 +291,10 @@ func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error } defer b.ch.Close() } - logger := log.With().Str("logger", "PrintRemoteBackups").Logger() w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, err := b.GetRemoteBackups(ctx, true) @@ -392,7 +386,6 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { return fmt.Errorf("can't connect to clickhouse: %v", err) } defer b.ch.Close() - logger := log.With().Str("logger", "PrintTables").Logger() allTables, err := b.GetTables(ctx, tablePattern) if err != nil { return err @@ -412,16 +405,16 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { } if table.Skip { if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\tskip\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } continue } if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\t\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } return nil } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 207e0405..73233488 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "github.com/Altinity/clickhouse-backup/pkg/status" - "github.com/rs/zerolog" "os" "os/exec" "path" @@ -39,10 +38,6 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "restore", - }).Logger() doRestoreData := !schemaOnly || dataOnly if err := b.ch.Connect(); err != nil { @@ -60,7 +55,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { - logger.Warn().Msgf("%v", err) + log.Warn().Msgf("%v", err) return ErrUnknownClickhouseDataPath } backupMetafileLocalPaths := []string{path.Join(defaultDataPath, "backup", backupName, "metadata.json")} @@ -70,7 +65,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par if err == nil && embeddedBackupPath != "" { backupMetafileLocalPaths = append(backupMetafileLocalPaths, path.Join(embeddedBackupPath, backupName, "metadata.json")) } else if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { - logger.Warn().Msgf("%v", err) + log.Warn().Msgf("%v", err) } else if err != nil { return err } @@ -106,7 +101,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } if len(backupMetadata.Tables) == 0 { - logger.Warn().Msgf("'%s' doesn't contains tables for restore", backupName) + log.Warn().Msgf("'%s' doesn't contains tables for restore", backupName) if (!rbacOnly) && (!configsOnly) { return nil } @@ -129,13 +124,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if needRestart { - logger.Warn().Msgf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) + log.Warn().Msgf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) cmd, err := shellwords.Parse(b.ch.Config.RestartCommand) if err != nil { return err } ctx, cancel := context.WithTimeout(ctx, 180*time.Second) - logger.Info().Msgf("run %s", b.ch.Config.RestartCommand) + log.Info().Msgf("run %s", b.ch.Config.RestartCommand) var out []byte if len(cmd) > 1 { out, err = exec.CommandContext(ctx, cmd[0], cmd[1:]...).CombinedOutput() @@ -143,7 +138,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par out, err = exec.CommandContext(ctx, cmd[0]).CombinedOutput() } cancel() - logger.Debug().Msg(string(out)) + log.Debug().Msg(string(out)) return err } @@ -157,7 +152,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } } - logger.Info().Msg("done") + log.Info().Msg("done") return nil } @@ -215,14 +210,13 @@ func (b *Backuper) prepareRestoreDatabaseMapping(databaseMapping []string) error // restoreRBAC - copy backup_name>/rbac folder to access_data_path func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - logger := log.With().Str("logger", "restoreRBAC").Logger() accessPath, err := b.ch.GetAccessManagementPath(ctx, nil) if err != nil { return err } if err = b.restoreBackupRelatedDir(backupName, "access", accessPath, disks); err == nil { markFile := path.Join(accessPath, "need_rebuild_lists.mark") - logger.Info().Msgf("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) + log.Info().Msgf("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) file, err := os.Create(markFile) if err != nil { return err @@ -230,7 +224,7 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c _ = file.Close() _ = filesystemhelper.Chown(markFile, b.ch, disks, false) listFilesPattern := path.Join(accessPath, "*.list") - logger.Info().Msgf("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) + log.Info().Msgf("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) if listFiles, err := filepathx.Glob(listFilesPattern); err != nil { return err } else { @@ -257,7 +251,6 @@ func (b *Backuper) restoreConfigs(backupName string, disks []clickhouse.Disk) er } func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinationDir string, disks []clickhouse.Disk) error { - logger := log.With().Str("logger", "restoreBackupRelatedDir").Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -271,7 +264,7 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat if !info.IsDir() { return fmt.Errorf("%s is not a dir", srcBackupDir) } - logger.Debug().Msgf("copy %s -> %s", srcBackupDir, destinationDir) + log.Debug().Msgf("copy %s -> %s", srcBackupDir, destinationDir) copyOptions := recursiveCopy.Options{OnDirExists: func(src, dest string) recursiveCopy.DirExistsAction { return recursiveCopy.Merge }} @@ -294,11 +287,6 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat // RestoreSchema - restore schemas matched by tablePattern from backupName func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern string, dropTable, ignoreDependencies bool, disks []clickhouse.Disk, isEmbedded bool) error { - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "restore", - }).Logger() - defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -339,14 +327,14 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern s if len(tablesForRestore) == 0 { return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) } - if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, logger); dropErr != nil { + if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version); dropErr != nil { return dropErr } var restoreErr error if isEmbedded { restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore) } else { - restoreErr = b.restoreSchemaRegular(tablesForRestore, version, logger) + restoreErr = b.restoreSchemaRegular(tablesForRestore, version) } if restoreErr != nil { return restoreErr @@ -360,7 +348,7 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) } -func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int, logger zerolog.Logger) error { +func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int) error { totalRetries := len(tablesForRestore) restoreRetries := 0 isDatabaseCreated := common.EmptyMap{} @@ -389,7 +377,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i // https://github.com/Altinity/clickhouse-backup/issues/466 if b.cfg.General.RestoreSchemaOnCluster == "" && strings.Contains(schema.Query, "{uuid}") && strings.Contains(schema.Query, "Replicated") { if !strings.Contains(schema.Query, "UUID") { - logger.Warn().Msgf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") + log.Warn().Msgf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") } else { schema.Query = UUIDWithReplicatedMergeTreeRE.ReplaceAllString(schema.Query, "$1$2$3'$4'$5$4$7") } @@ -407,7 +395,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i schema.Database, schema.Table, restoreErr, restoreRetries, ) } else { - logger.Warn().Msgf( + log.Warn().Msgf( "can't create table '%s.%s': %v, will try again", schema.Database, schema.Table, restoreErr, ) } @@ -422,7 +410,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i return nil } -func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int, logger zerolog.Logger) error { +func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int) error { var dropErr error dropRetries := 0 totalRetries := len(tablesForDrop) @@ -463,7 +451,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci schema.Database, schema.Table, dropErr, dropRetries, ) } else { - logger.Warn().Msgf( + log.Warn().Msgf( "can't drop table '%s.%s': %v, will try again", schema.Database, schema.Table, dropErr, ) } @@ -481,10 +469,6 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci // RestoreData - restore data for tables matched by tablePattern from backupName func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePattern string, partitions []string, disks []clickhouse.Disk, isEmbedded bool) error { startRestore := time.Now() - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "restore", - }).Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -517,16 +501,19 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePatt if len(tablesForRestore) == 0 { return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) } - logger.Debug().Msgf("found %d tables with data in backup", len(tablesForRestore)) + log.Debug().Msgf("found %d tables with data in backup", len(tablesForRestore)) if isEmbedded { err = b.restoreDataEmbedded(ctx, backupName, tablesForRestore, partitions) } else { - err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, disks, logger) + err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, disks) } if err != nil { return err } - logger.Info().Str("duration", utils.HumanizeDuration(time.Since(startRestore))).Msg("done") + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "restore_data", + }).Str("duration", utils.HumanizeDuration(time.Since(startRestore))).Msg("done") return nil } @@ -534,7 +521,7 @@ func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, t return b.restoreEmbedded(ctx, backupName, false, tablesForRestore, partitions) } -func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk, logger zerolog.Logger) error { +func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk) error { if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreDatabaseMapping(tablePattern) } @@ -542,7 +529,7 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta if err != nil { return err } - disks = b.adjustDisksFromTablesWithSystemDisks(tablesForRestore, diskMap, logger, disks) + disks = b.adjustDisksFromTablesWithSystemDisks(tablesForRestore, diskMap, disks) dstTablesMap := b.prepareDstTablesMap(chTables) missingTables := b.checkMissingTables(tablesForRestore, chTables) @@ -559,7 +546,6 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta tablesForRestore[i].Database = targetDB } } - tableLog := logger.With().Str("table", fmt.Sprintf("%s.%s", dstDatabase, table.Table)).Logger() dstTable, ok := dstTablesMap[metadata.TableTitle{ Database: dstDatabase, Table: table.Table}] @@ -568,41 +554,45 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta } // https://github.com/Altinity/clickhouse-backup/issues/529 if b.cfg.ClickHouse.RestoreAsAttach { - if err = b.restoreDataRegularByAttach(ctx, backupName, table, disks, dstTable, tableLog, tablesForRestore, i); err != nil { + if err = b.restoreDataRegularByAttach(ctx, backupName, table, disks, dstTable, tablesForRestore, i); err != nil { return err } } else { - if err = b.restoreDataRegularByParts(backupName, table, disks, dstTable, tableLog, tablesForRestore, i); err != nil { + if err = b.restoreDataRegularByParts(backupName, table, disks, dstTable, tablesForRestore, i); err != nil { return err } } // https://github.com/Altinity/clickhouse-backup/issues/529 for _, mutation := range table.Mutations { if err := b.ch.ApplyMutation(ctx, tablesForRestore[i], mutation); err != nil { - tableLog.Warn().Msgf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[i].Database, tablesForRestore[i].Table, err) + log.Warn().Msgf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[i].Database, tablesForRestore[i].Table, err) } } - tableLog.Info().Msg("done") + log.Info(). + Str("operation", "restore"). + Str("database", table.Database). + Str("table", table.Table). + Msg("done") } return nil } -func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger, tablesForRestore ListOfTables, i int) error { +func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, tablesForRestore ListOfTables, i int) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, dstTable.DataPaths, b.ch, false); err != nil { return fmt.Errorf("can't copy data to storage '%s.%s': %v", table.Database, table.Table, err) } - logger.Debug().Msg("data to 'storage' copied") + log.Debug().Msg("data to 'storage' copied") if err := b.ch.AttachTable(ctx, tablesForRestore[i]); err != nil { return fmt.Errorf("can't attach table '%s.%s': %v", tablesForRestore[i].Database, tablesForRestore[i].Table, err) } return nil } -func (b *Backuper) restoreDataRegularByParts(backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger, tablesForRestore ListOfTables, i int) error { +func (b *Backuper) restoreDataRegularByParts(backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, tablesForRestore ListOfTables, i int) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, dstTable.DataPaths, b.ch, true); err != nil { return fmt.Errorf("can't copy data to datached '%s.%s': %v", table.Database, table.Table, err) } - logger.Debug().Msg("data to 'detached' copied") + log.Debug().Msg("data to 'detached' copied") if err := b.ch.AttachDataParts(tablesForRestore[i], disks); err != nil { return fmt.Errorf("can't attach data parts for table '%s.%s': %v", tablesForRestore[i].Database, tablesForRestore[i].Table, err) } @@ -643,11 +633,11 @@ func (b *Backuper) prepareDstTablesMap(chTables []clickhouse.Table) map[metadata return dstTablesMap } -func (b *Backuper) adjustDisksFromTablesWithSystemDisks(tablesForRestore ListOfTables, diskMap map[string]string, logger zerolog.Logger, disks []clickhouse.Disk) []clickhouse.Disk { +func (b *Backuper) adjustDisksFromTablesWithSystemDisks(tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk) []clickhouse.Disk { for _, t := range tablesForRestore { for disk := range t.Parts { if _, diskExists := diskMap[disk]; !diskExists { - logger.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will restored to %s", t.Database, t.Table, disk, diskMap["default"]) + log.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will restored to %s", t.Database, t.Table, disk, diskMap["default"]) found := false for _, d := range disks { if d.Name == disk { diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index 26bf188c..093e4aaf 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -49,7 +49,6 @@ func addTableToListIfNotExistsOrEnrichQueryAndParts(tables ListOfTables, table m func getTableListByPatternLocal(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, metadataPath string, tablePattern string, dropTable bool, partitions []string) (ListOfTables, error) { result := ListOfTables{} tablePatterns := []string{"*"} - logger := log.With().Str("logger", "getTableListByPatternLocal").Logger() if tablePattern != "" { tablePatterns = strings.Split(tablePattern, ",") } @@ -110,7 +109,7 @@ func getTableListByPatternLocal(ctx context.Context, cfg *config.Config, ch *cli } dataParts, err := os.ReadDir(dataPartsPath) if err != nil { - logger.Warn().Msg(err.Error()) + log.Warn().Msg(err.Error()) } parts := map[string][]metadata.Part{ cfg.ClickHouse.EmbeddedBackupDisk: make([]metadata.Part, len(dataParts)), diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 0168229f..7f7a84ac 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -56,10 +56,6 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if b.cfg.General.RemoteStorage == "custom" { return custom.Upload(ctx, b.cfg, backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly) } - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "upload", - }).Logger() if _, disks, err = b.getLocalBackup(ctx, backupName, nil); err != nil { return fmt.Errorf("can't find local backup: %v", err) } @@ -68,7 +64,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str } defer func() { if err := b.dst.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -81,7 +77,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if !b.resume { return fmt.Errorf("'%s' already exists on remote storage", backupName) } else { - logger.Warn().Msgf("'%s' already exists on remote, will try to resume upload", backupName) + log.Warn().Msgf("'%s' already exists on remote, will try to resume upload", backupName) } } } @@ -125,13 +121,13 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str compressedDataSize := int64(0) metadataSize := int64(0) - logger.Debug().Msgf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) + log.Debug().Msgf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) uploadSemaphore := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency)) uploadGroup, uploadCtx := errgroup.WithContext(ctx) for i, table := range tablesForUpload { if err := uploadSemaphore.Acquire(uploadCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Upload table: %v", err) + log.Error().Msgf("can't acquire semaphore during Upload table: %v", err) break } start := time.Now() @@ -147,9 +143,6 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str idx := i uploadGroup.Go(func() error { defer uploadSemaphore.Release(1) - uploadLogger := logger.With(). - Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)). - Logger() var uploadedBytes int64 if !schemaOnly { var files map[string][]string @@ -166,8 +159,8 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str return err } atomic.AddInt64(&metadataSize, tableMetadataSize) - uploadLogger.Info(). - Str("duration", utils.HumanizeDuration(time.Since(start))). + log.Info(). + Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)).Str("duration", utils.HumanizeDuration(time.Since(start))). Str("size", utils.FormatBytes(uint64(uploadedBytes+tableMetadataSize))). Msg("done") return nil @@ -232,7 +225,11 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if b.resume { b.resumableState.Close() } - logger.Info(). + log.Info(). + Fields(map[string]interface{}{ + "backup": backupName, + "operation": "upload", + }). Str("duration", utils.HumanizeDuration(time.Since(startUpload))). Str("size", utils.FormatBytes(uint64(compressedDataSize)+uint64(metadataSize)+uint64(len(newBackupMetadataBody))+backupMetadata.RBACSize+backupMetadata.ConfigSize)). Msg("done") @@ -248,14 +245,13 @@ func (b *Backuper) uploadSingleBackupFile(ctx context.Context, localFile, remote if b.resume && b.resumableState.IsAlreadyProcessedBool(remoteFile) { return nil } - logger := log.With().Str("logger", "uploadSingleBackupFile").Logger() f, err := os.Open(localFile) if err != nil { return fmt.Errorf("can't open %s: %v", localFile, err) } defer func() { if err := f.Close(); err != nil { - logger.Warn().Msgf("can't close %v: %v", f, err) + log.Warn().Msgf("can't close %v: %v", f, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -350,7 +346,6 @@ func (b *Backuper) getTablesForUploadDiffRemote(ctx context.Context, diffFromRem } func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, diffFrom string, diffFromRemote string) error { - logger := log.With().Str("logger", "validateUploadParams").Logger() if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for upload, change you config or use REMOTE_STORAGE environment variable") } @@ -371,7 +366,7 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, return fmt.Errorf("%s->`compression_format`=%s incompatible with general->upload_by_part=%v", b.cfg.General.RemoteStorage, b.cfg.GetCompressionFormat(), b.cfg.General.UploadByPart) } if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore { - logger.Warn().Msgf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") + log.Warn().Msgf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") } if b.cfg.General.RemoteStorage == "custom" && b.resume { return fmt.Errorf("can't resume for `remote_storage: custom`") @@ -597,7 +592,6 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s return processedSize, nil } } - logger := log.With().Str("logger", "uploadTableMetadataEmbedded").Logger() localTableMetaFile := path.Join(b.EmbeddedBackupDataPath, backupName, "metadata", common.TablePathEncode(tableMetadata.Database), fmt.Sprintf("%s.sql", common.TablePathEncode(tableMetadata.Table))) localReader, err := os.Open(localTableMetaFile) if err != nil { @@ -605,7 +599,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } defer func() { if err := localReader.Close(); err != nil { - logger.Warn().Msgf("can't close %v: %v", localReader, err) + log.Warn().Msgf("can't close %v: %v", localReader, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -626,7 +620,6 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTable *metadata.TableMetadata, newTable *metadata.TableMetadata, checkLocal bool) { - logger := log.With().Str("logger", "markDuplicatedParts").Logger() for disk, newParts := range newTable.Parts { if _, diskExists := existsTable.Parts[disk]; diskExists { if len(existsTable.Parts[disk]) == 0 { @@ -646,7 +639,7 @@ func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTa newPath := path.Join(b.DiskToPathMap[disk], "backup", backup.BackupName, "shadow", dbAndTablePath, disk, newParts[i].Name) if err := filesystemhelper.IsDuplicatedParts(existsPath, newPath); err != nil { - logger.Debug().Msgf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) + log.Debug().Msgf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) continue } } @@ -706,7 +699,6 @@ func (b *Backuper) splitPartFiles(basePath string, parts []metadata.Part) ([]met } func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - logger := log.With().Str("logger", "splitFilesByName").Logger() result := make([]metadata.SplitPartFiles, 0) for i := range parts { if parts[i].Required { @@ -726,7 +718,7 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - logger.Warn().Msgf("filepath.Walk return error: %v", err) + log.Warn().Msgf("filepath.Walk return error: %v", err) } result = append(result, metadata.SplitPartFiles{ Prefix: parts[i].Name, @@ -737,7 +729,6 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m } func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - logger := log.With().Str("logger", "splitFilesBySize").Logger() var size int64 var files []string maxSize := b.cfg.General.MaxFileSize @@ -770,7 +761,7 @@ func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - logger.Warn().Msgf("filepath.Walk return error: %v", err) + log.Warn().Msgf("filepath.Walk return error: %v", err) } } if len(files) > 0 { diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index e8c51b45..6a13e4f3 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -108,10 +108,6 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } } backupName, err := b.NewBackupWatchName(ctx, backupType) - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "watch", - }).Logger() if err != nil { return err } @@ -130,14 +126,20 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } else { createRemoteErr = b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, false, version, commandId) if createRemoteErr != nil { - logger.Error().Msgf("create_remote %s return error: %v", backupName, createRemoteErr) + log.Error().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "watch", + }).Msgf("create_remote %s return error: %v", backupName, createRemoteErr) createRemoteErrCount += 1 } else { createRemoteErrCount = 0 } deleteLocalErr = b.RemoveBackupLocal(ctx, backupName, nil) if deleteLocalErr != nil { - logger.Error().Msgf("delete local %s return error: %v", backupName, deleteLocalErr) + log.Error().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "watch", + }).Msgf("delete local %s return error: %v", backupName, deleteLocalErr) deleteLocalErrCount += 1 } else { deleteLocalErrCount = 0 diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index bcce072d..b1ea8860 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -21,6 +21,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "github.com/rs/zerolog/log" ) // ClickHouse - provide @@ -28,7 +29,6 @@ type ClickHouse struct { Config *config.ClickHouseConfig conn driver.Conn disks []Disk - Logger zerolog.Logger version int isPartsColumnPresent int8 IsOpen bool @@ -38,7 +38,7 @@ type ClickHouse struct { func (ch *ClickHouse) Connect() error { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Logger.Error().Msgf("close previous connection error: %v", err) + log.Error().Msgf("close previous connection error: %v", err) } } ch.IsOpen = false @@ -78,7 +78,7 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCert != "" || ch.Config.TLSKey != "" { cert, err := tls.LoadX509KeyPair(ch.Config.TLSCert, ch.Config.TLSKey) if err != nil { - ch.Logger.Error().Msgf("tls.LoadX509KeyPair error: %v", err) + log.Error().Msgf("tls.LoadX509KeyPair error: %v", err) return err } tlsConfig.Certificates = []tls.Certificate{cert} @@ -86,12 +86,12 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCa != "" { caCert, err := os.ReadFile(ch.Config.TLSCa) if err != nil { - ch.Logger.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) + log.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) return err } caCertPool := x509.NewCertPool() if caCertPool.AppendCertsFromPEM(caCert) != true { - ch.Logger.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) + log.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) return fmt.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) } tlsConfig.RootCAs = caCertPool @@ -104,18 +104,18 @@ func (ch *ClickHouse) Connect() error { } if ch.conn, err = clickhouse.Open(opt); err != nil { - ch.Logger.Error().Msgf("clickhouse connection: %s, clickhouse.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Error().Msgf("clickhouse connection: %s, clickhouse.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } - logFunc := ch.Logger.Info() + logFunc := log.Info() if !ch.Config.LogSQLQueries { - logFunc = ch.Logger.Debug() + logFunc = log.Debug() } logFunc.Stack().Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v?timeout=%v", ch.Config.Host, ch.Config.Port, ch.Config.Timeout)) err = ch.conn.Ping(context.Background()) if err != nil { - ch.Logger.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } else { ch.IsOpen = true @@ -259,13 +259,13 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro func (ch *ClickHouse) Close() { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Logger.Warn().Msgf("can't close clickhouse connection: %v", err) + log.Warn().Msgf("can't close clickhouse connection: %v", err) } } if ch.Config.LogSQLQueries { - ch.Logger.Info().Msg("clickhouse connection closed") + log.Info().Msg("clickhouse connection closed") } else { - ch.Logger.Debug().Msg("clickhouse connection closed") + log.Debug().Msg("clickhouse connection closed") } ch.IsOpen = false } @@ -461,7 +461,7 @@ func (ch *ClickHouse) GetDatabases(ctx context.Context, cfg *config.Config, tabl var result string // 19.4 doesn't have /var/lib/clickhouse/metadata/default.sql if err := ch.SelectSingleRow(ctx, &result, showDatabaseSQL); err != nil { - ch.Logger.Warn().Msgf("can't get create database query: %v", err) + log.Warn().Msgf("can't get create database query: %v", err) allDatabases[i].Query = fmt.Sprintf("CREATE DATABASE `%s` ENGINE = %s", db.Name, db.Engine) } else { // 23.3+ masked secrets https://github.com/Altinity/clickhouse-backup/issues/640 @@ -486,7 +486,7 @@ func (ch *ClickHouse) getTableSizeFromParts(ctx context.Context, table Table) ui } query := fmt.Sprintf("SELECT sum(bytes_on_disk) as size FROM system.parts WHERE active AND database='%s' AND table='%s' GROUP BY database, table", table.Database, table.Name) if err := ch.SelectContext(ctx, &tablesSize, query); err != nil { - ch.Logger.Warn().Msgf("error parsing tablesSize: %v", err) + log.Warn().Msgf("error parsing tablesSize: %v", err) } if len(tablesSize) > 0 { return tablesSize[0].Size @@ -517,7 +517,7 @@ func (ch *ClickHouse) fixVariousVersions(ctx context.Context, t Table, metadataP if strings.Contains(t.CreateTableQuery, "'[HIDDEN]'") { tableSQLPath := path.Join(metadataPath, common.TablePathEncode(t.Database), common.TablePathEncode(t.Name)+".sql") if attachSQL, err := os.ReadFile(tableSQLPath); err != nil { - ch.Logger.Warn().Msgf("can't read %s: %v", tableSQLPath, err) + log.Warn().Msgf("can't read %s: %v", tableSQLPath, err) } else { t.CreateTableQuery = strings.Replace(string(attachSQL), "ATTACH", "CREATE", 1) t.CreateTableQuery = strings.Replace(t.CreateTableQuery, " _ ", " `"+t.Database+"`.`"+t.Name+"` ", 1) @@ -536,7 +536,7 @@ func (ch *ClickHouse) GetVersion(ctx context.Context) (int, error) { var err error query := "SELECT value FROM `system`.`build_options` where name='VERSION_INTEGER'" if err = ch.SelectSingleRow(ctx, &result, query); err != nil { - ch.Logger.Warn().Msgf("can't get ClickHouse version: %v", err) + log.Warn().Msgf("can't get ClickHouse version: %v", err) return 0, nil } ch.version, err = strconv.Atoi(result) @@ -567,7 +567,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name withNameQuery = fmt.Sprintf("WITH NAME '%s'", name) } for _, item := range partitions { - ch.Logger.Debug().Msgf(" partition '%v'", item.PartitionID) + log.Debug().Msgf(" partition '%v'", item.PartitionID) query := fmt.Sprintf( "ALTER TABLE `%v`.`%v` FREEZE PARTITION ID '%v' %s;", table.Database, @@ -585,7 +585,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name } if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Logger.Warn().Msgf("can't freeze partition: %v", err) + log.Warn().Msgf("can't freeze partition: %v", err) } else { return fmt.Errorf("can't freeze partition '%s': %w", item.PartitionID, err) } @@ -604,9 +604,9 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string if strings.HasPrefix(table.Engine, "Replicated") && ch.Config.SyncReplicatedTables { query := fmt.Sprintf("SYSTEM SYNC REPLICA `%s`.`%s`;", table.Database, table.Name) if err := ch.QueryContext(ctx, query); err != nil { - ch.Logger.Warn().Msgf("can't sync replica: %v", err) + log.Warn().Msgf("can't sync replica: %v", err) } else { - ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") } } if version < 19001005 || ch.Config.FreezeByPart { @@ -619,7 +619,7 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string query := fmt.Sprintf("ALTER TABLE `%s`.`%s` FREEZE %s;", table.Database, table.Name, withNameQuery) if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Logger.Warn().Msgf("can't freeze table: %v", err) + log.Warn().Msgf("can't freeze table: %v", err) return nil } return fmt.Errorf("can't freeze table: %v", err) @@ -643,7 +643,7 @@ func (ch *ClickHouse) AttachDataParts(table metadata.TableMetadata, disks []Disk if err := ch.Query(query); err != nil { return err } - ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") } } } @@ -656,7 +656,7 @@ var uuidRE = regexp.MustCompile(`UUID '([^']+)'`) // AttachTable - execute ATTACH TABLE command for specific table func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetadata) error { if len(table.Parts) == 0 { - ch.Logger.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) + log.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) return nil } canContinue, err := ch.CheckReplicationInProgress(table) @@ -703,7 +703,7 @@ func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetad return err } - ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") return nil } func (ch *ClickHouse) ShowCreateTable(ctx context.Context, database, name string) string { @@ -834,7 +834,7 @@ func (ch *ClickHouse) CreateTable(table Table, query string, dropTable, ignoreDe if onCluster != "" && distributedRE.MatchString(query) { matches := distributedRE.FindAllStringSubmatch(query, -1) if onCluster != strings.Trim(matches[0][2], "'\" ") { - ch.Logger.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) + log.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) query = distributedRE.ReplaceAllString(query, fmt.Sprintf("${1}(%s,${3})", onCluster)) } } @@ -857,7 +857,7 @@ func (ch *ClickHouse) IsClickhouseShadow(path string) bool { } defer func() { if err := d.Close(); err != nil { - ch.Logger.Warn().Msgf("can't close directory %v", err) + log.Warn().Msgf("can't close directory %v", err) } }() names, err := d.Readdirnames(-1) @@ -910,9 +910,9 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { var logF *zerolog.Event if !ch.Config.LogSQLQueries { - logF = ch.Logger.Debug() + logF = log.Debug() } else { - logF = ch.Logger.Info() + logF = log.Info() } if len(args) > 0 { logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) @@ -1049,10 +1049,10 @@ func (ch *ClickHouse) CheckReplicationInProgress(table metadata.TableMetadata) ( return false, fmt.Errorf("invalid result for check exists replicas: %+v", existsReplicas) } if existsReplicas[0].InProgress > 0 { - ch.Logger.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) + log.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) return false, nil } else { - ch.Logger.Info().Msgf("replication_in_progress status = %+v", existsReplicas) + log.Info().Msgf("replication_in_progress status = %+v", existsReplicas) } } return true, nil @@ -1089,7 +1089,7 @@ func (ch *ClickHouse) CheckSystemPartsColumns(ctx context.Context, table *Table) } if len(isPartsColumnsInconsistent) > 0 { for i := range isPartsColumnsInconsistent { - ch.Logger.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) + log.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) } return fmt.Errorf("`%s`.`%s` have inconsistent data types for active data part in system.parts_columns", table.Database, table.Name) } diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 2321cea5..d71662fd 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -119,12 +119,11 @@ func MkdirAll(path string, ch *clickhouse.ClickHouse, disks []clickhouse.Disk) e // HardlinkBackupPartsToStorage - copy parts for specific table to detached folder func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableMetadata, disks []clickhouse.Disk, tableDataPaths []string, ch *clickhouse.ClickHouse, toDetached bool) error { dstDataPaths := clickhouse.GetDisksByPaths(disks, tableDataPaths) - logger := log.With().Fields(map[string]interface{}{"operation": "HardlinkBackupPartsToStorage"}).Logger() start := time.Now() for _, backupDisk := range disks { backupDiskName := backupDisk.Name if len(backupTable.Parts[backupDiskName]) == 0 { - logger.Debug().Msgf("%s disk have no parts", backupDisk.Name) + log.Debug().Msgf("%s disk have no parts", backupDisk.Name) continue } dstParentDir := dstDataPaths[backupDiskName] @@ -136,9 +135,9 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM info, err := os.Stat(dstPartPath) if err != nil { if os.IsNotExist(err) { - logger.Debug().Msgf("MkDirAll %s", dstPartPath) + log.Debug().Msgf("MkDirAll %s", dstPartPath) if mkdirErr := MkdirAll(dstPartPath, ch, disks); mkdirErr != nil { - logger.Warn().Msgf("error during Mkdir %+v", mkdirErr) + log.Warn().Msgf("error during Mkdir %+v", mkdirErr) } } else { return err @@ -159,14 +158,14 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM filename := strings.Trim(strings.TrimPrefix(filePath, partPath), "/") dstFilePath := filepath.Join(dstPartPath, filename) if info.IsDir() { - logger.Debug().Msgf("MkDir %s", dstFilePath) + log.Debug().Msgf("MkDir %s", dstFilePath) return Mkdir(dstFilePath, ch, disks) } if !info.Mode().IsRegular() { - logger.Debug().Msgf("'%s' is not a regular file, skipping.", filePath) + log.Debug().Msgf("'%s' is not a regular file, skipping.", filePath) return nil } - logger.Debug().Msgf("Link %s -> %s", filePath, dstFilePath) + log.Debug().Msgf("Link %s -> %s", filePath, dstFilePath) if err := os.Link(filePath, dstFilePath); err != nil { if !os.IsExist(err) { return fmt.Errorf("failed to create hard link '%s' -> '%s': %w", filePath, dstFilePath, err) @@ -178,7 +177,7 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM } } } - logger.Debug().Str("duration", utils.HumanizeDuration(time.Since(start))).Msg("done") + log.Debug().Str("duration", utils.HumanizeDuration(time.Since(start))).Msg("done") return nil } @@ -194,7 +193,6 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa } func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap) ([]metadata.Part, int64, error) { - logger := log.With().Str("logger", "MoveShadow").Logger() size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { @@ -221,7 +219,7 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E return os.MkdirAll(dstFilePath, 0750) } if !info.Mode().IsRegular() { - logger.Debug().Msgf("'%s' is not a regular file, skipping", filePath) + log.Debug().Msgf("'%s' is not a regular file, skipping", filePath) return nil } size += info.Size() @@ -231,14 +229,13 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E } func IsDuplicatedParts(part1, part2 string) error { - logger := log.With().Str("logger", "IsDuplicatedParts").Logger() p1, err := os.Open(part1) if err != nil { return err } defer func() { if err = p1.Close(); err != nil { - logger.Warn().Msgf("Can't close %s", part1) + log.Warn().Msgf("Can't close %s", part1) } }() p2, err := os.Open(part2) @@ -247,7 +244,7 @@ func IsDuplicatedParts(part1, part2 string) error { } defer func() { if err = p2.Close(); err != nil { - logger.Warn().Msgf("Can't close %s", part2) + log.Warn().Msgf("Can't close %s", part2) } }() pf1, err := p1.Readdirnames(-1) diff --git a/pkg/metadata/load.go b/pkg/metadata/load.go index f996da92..2e18cf4d 100644 --- a/pkg/metadata/load.go +++ b/pkg/metadata/load.go @@ -7,7 +7,6 @@ import ( ) func (tm *TableMetadata) Load(location string) (uint64, error) { - logger := log.With().Str("logger", "metadata.Load").Logger() data, err := os.ReadFile(location) if err != nil { return 0, err @@ -15,6 +14,6 @@ func (tm *TableMetadata) Load(location string) (uint64, error) { if err := json.Unmarshal(data, tm); err != nil { return 0, err } - logger.Debug().Msgf("success %s", location) + log.Debug().Str("operation", "TableMetadata.Load").Msgf("success %s", location) return uint64(len(data)), nil } diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index d69819a5..5d8f52d9 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -9,7 +9,6 @@ import ( "strings" "sync" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -17,7 +16,6 @@ type State struct { stateFile string currentState string params map[string]interface{} - logger zerolog.Logger fp *os.File mx *sync.RWMutex } @@ -27,11 +25,10 @@ func NewState(defaultDiskPath, backupName, command string, params map[string]int stateFile: path.Join(defaultDiskPath, "backup", backupName, fmt.Sprintf("%s.state", command)), currentState: "", mx: &sync.RWMutex{}, - logger: log.Logger.With().Str("logger", "resumable").Logger(), } fp, err := os.OpenFile(s.stateFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) if err != nil { - s.logger.Warn().Msgf("can't open %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't open %s error: %v", s.stateFile, err) } s.fp = fp s.LoadState() @@ -57,7 +54,7 @@ func (s *State) LoadParams() { //size 0 during write lines[0] = strings.TrimSuffix(lines[0], ":0") if err := json.Unmarshal([]byte(lines[0]), &s.params); err != nil { - s.logger.Error().Msgf("can't parse state file line 0 as []interface{}: %s", lines[0]) + log.Error().Msgf("can't parse state file line 0 as []interface{}: %s", lines[0]) } } @@ -69,9 +66,9 @@ func (s *State) LoadState() { } else { s.currentState = "" if !os.IsNotExist(err) { - s.logger.Warn().Msgf("can't read %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't read %s error: %v", s.stateFile, err) } else { - s.logger.Warn().Msgf("%s empty, will continue from scratch error: %v", s.stateFile, err) + log.Warn().Msgf("%s empty, will continue from scratch error: %v", s.stateFile, err) } } s.mx.Unlock() @@ -83,11 +80,11 @@ func (s *State) AppendToState(path string, size int64) { if s.fp != nil { _, err := s.fp.WriteString(path + "\n") if err != nil { - s.logger.Warn().Msgf("can't write %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't write %s error: %v", s.stateFile, err) } err = s.fp.Sync() if err != nil { - s.logger.Warn().Msgf("can't sync %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't sync %s error: %v", s.stateFile, err) } } s.currentState += path + "\n" @@ -105,12 +102,12 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { res := strings.Index(s.currentState, path+":") if res >= 0 { // s.logger is non thread-safe https://github.com/rs/zerolog/issues/242 - s.logger.Info().Msgf("%s already processed", path) + log.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] size, err = strconv.ParseInt(sSize, 10, 64) if err != nil { - s.logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) + log.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) } } s.mx.RUnlock() diff --git a/pkg/server/metrics/metrics.go b/pkg/server/metrics/metrics.go index 062656a6..ecbca8bf 100644 --- a/pkg/server/metrics/metrics.go +++ b/pkg/server/metrics/metrics.go @@ -2,7 +2,6 @@ package metrics import ( "fmt" - "github.com/rs/zerolog" "time" "github.com/prometheus/client_golang/prometheus" @@ -34,7 +33,6 @@ type APIMetrics struct { NumberBackupsLocalExpected prometheus.Gauge SubCommands map[string][]string - logger zerolog.Logger } func NewAPIMetrics() *APIMetrics { @@ -43,7 +41,6 @@ func NewAPIMetrics() *APIMetrics { "create_remote": {"create", "upload"}, "restore_remote": {"download", "restore"}, }, - logger: log.With().Str("logger", "metrics").Logger(), } return metrics } @@ -175,7 +172,7 @@ func (m *APIMetrics) Start(command string, startTime time.Time) { } } } else { - m.logger.Warn().Msgf("%s not found in LastStart metrics", command) + log.Warn().Msgf("%s not found in LastStart metrics", command) } } func (m *APIMetrics) Finish(command string, startTime time.Time) { @@ -191,19 +188,19 @@ func (m *APIMetrics) Finish(command string, startTime time.Time) { } } } else { - m.logger.Warn().Msgf("%s not found in LastFinish", command) + log.Warn().Msgf("%s not found in LastFinish", command) } } func (m *APIMetrics) Success(command string) { if _, exists := m.SuccessfulCounter[command]; exists { m.SuccessfulCounter[command].Inc() } else { - m.logger.Warn().Msgf("%s not found in SuccessfulCounter metrics", command) + log.Warn().Msgf("%s not found in SuccessfulCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(1) } else { - m.logger.Warn().Msgf("%s not found in LastStatus metrics", command) + log.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -211,12 +208,12 @@ func (m *APIMetrics) Failure(command string) { if _, exists := m.FailedCounter[command]; exists { m.FailedCounter[command].Inc() } else { - m.logger.Warn().Msgf("%s not found in FailedCounter metrics", command) + log.Warn().Msgf("%s not found in FailedCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(0) } else { - m.logger.Warn().Msgf("%s not found in LastStatus metrics", command) + log.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -226,7 +223,7 @@ func (m *APIMetrics) ExecuteWithMetrics(command string, errCounter int, f func() err := f() m.Finish(command, startTime) if err != nil { - m.logger.Error().Msgf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) + log.Error().Msgf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) errCounter += 1 m.Failure(command) } else { diff --git a/pkg/server/server.go b/pkg/server/server.go index bf455402..0e4d1d16 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/rs/zerolog" "io" "net/http" "net/http/pprof" @@ -44,7 +43,6 @@ type APIServer struct { server *http.Server restart chan struct{} metrics *metrics.APIMetrics - log zerolog.Logger routes []string clickhouseBackupVersion string } @@ -55,25 +53,23 @@ var ( // Run - expose CLI commands as REST API func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBackupVersion string) error { - logger := log.With().Str("logger", "server.Run").Logger() var ( cfg *config.Config err error ) - logger.Debug().Msg("Wait for ClickHouse") + log.Debug().Msg("Wait for ClickHouse") for { cfg, err = config.LoadConfig(configPath) if err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() time.Sleep(5 * time.Second) continue } ch := clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() time.Sleep(5 * time.Second) continue } @@ -88,16 +84,15 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack restart: make(chan struct{}), clickhouseBackupVersion: clickhouseBackupVersion, metrics: metrics.NewAPIMetrics(), - log: log.With().Str("logger", "server").Logger(), } if cfg.API.CreateIntegrationTables { if err := api.CreateIntegrationTables(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() } } api.metrics.RegisterMetrics() - logger.Info().Msgf("Starting API server on %s", api.config.API.ListenAddr) + log.Info().Msgf("Starting API server on %s", api.config.API.ListenAddr) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, os.Interrupt, syscall.SIGTERM) sighup := make(chan os.Signal, 1) @@ -108,14 +103,14 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack if api.config.API.CompleteResumableAfterRestart { go func() { if err := api.ResumeOperationsAfterRestart(); err != nil { - logger.Error().Msgf("ResumeOperationsAfterRestart return error: %v", err) + log.Error().Msgf("ResumeOperationsAfterRestart return error: %v", err) } }() } go func() { if err := api.UpdateBackupMetrics(context.Background(), false); err != nil { - logger.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() @@ -127,18 +122,18 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack select { case <-api.restart: if err := api.Restart(); err != nil { - logger.Error().Msgf("Failed to restarting API server: %v", err) + log.Error().Msgf("Failed to restarting API server: %v", err) continue } - logger.Info().Msgf("Reloaded by HTTP") + log.Info().Msgf("Reloaded by HTTP") case <-sighup: if err := api.Restart(); err != nil { - logger.Error().Msgf("Failed to restarting API server: %v", err) + log.Error().Msgf("Failed to restarting API server: %v", err) continue } - logger.Info().Msg("Reloaded by SIGHUP") + log.Info().Msg("Reloaded by SIGHUP") case <-sigterm: - logger.Info().Msg("Stopping API server") + log.Info().Msg("Stopping API server") return api.Stop() } } @@ -149,7 +144,7 @@ func (api *APIServer) GetMetrics() *metrics.APIMetrics { } func (api *APIServer) RunWatch(cliCtx *cli.Context) { - api.log.Info().Msg("Starting API Server in watch mode") + log.Info().Msg("Starting API Server in watch mode") b := backup.NewBackuper(api.config) commandId, _ := status.Current.Start("watch") err := b.Watch( @@ -167,7 +162,6 @@ func (api *APIServer) Stop() error { } func (api *APIServer) Restart() error { - logger := log.With().Str("logger", "server.Restart").Logger() _, err := api.ReloadConfig(nil, "restart") if err != nil { return err @@ -183,9 +177,9 @@ func (api *APIServer) Restart() error { err = api.server.ListenAndServeTLS(api.config.API.CertificateFile, api.config.API.PrivateKeyFile) if err != nil { if err == http.ErrServerClosed { - logger.Warn().Msgf("ListenAndServeTLS get signal: %s", err.Error()) + log.Warn().Msgf("ListenAndServeTLS get signal: %s", err.Error()) } else { - logger.Fatal().Stack().Msgf("ListenAndServeTLS error: %s", err.Error()) + log.Fatal().Stack().Msgf("ListenAndServeTLS error: %s", err.Error()) } } }() @@ -194,9 +188,9 @@ func (api *APIServer) Restart() error { go func() { if err = api.server.ListenAndServe(); err != nil { if err == http.ErrServerClosed { - logger.Warn().Msgf("ListenAndServe get signal: %s", err.Error()) + log.Warn().Msgf("ListenAndServe get signal: %s", err.Error()) } else { - logger.Fatal().Stack().Msgf("ListenAndServe error: %s", err.Error()) + log.Fatal().Stack().Msgf("ListenAndServe error: %s", err.Error()) } } }() @@ -206,7 +200,6 @@ func (api *APIServer) Restart() error { // registerHTTPHandlers - resister API routes func (api *APIServer) registerHTTPHandlers() *http.Server { - logger := log.With().Str("logger", "registerHTTPHandlers").Logger() r := mux.NewRouter() r.Use(api.basicAuthMiddleware) r.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -246,7 +239,7 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { routes = append(routes, t) return nil }); err != nil { - logger.Error().Msgf("mux.Router.Walk return error: %v", err) + log.Error().Msgf("mux.Router.Walk return error: %v", err) return nil } @@ -262,9 +255,9 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/metrics" { - api.log.Info().Msgf("API call %s %s", r.Method, r.URL.Path) + log.Info().Msgf("API call %s %s", r.Method, r.URL.Path) } else { - api.log.Debug().Msgf("API call %s %s", r.Method, r.URL.Path) + log.Debug().Msgf("API call %s %s", r.Method, r.URL.Path) } user, pass, _ := r.BasicAuth() query := r.URL.Query() @@ -275,11 +268,11 @@ func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { pass = p[0] } if (user != api.config.API.Username) || (pass != api.config.API.Password) { - api.log.Warn().Msgf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) + log.Warn().Msgf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) w.Header().Set("WWW-Authenticate", "Basic realm=\"Provide username and password\"") w.WriteHeader(http.StatusUnauthorized) if _, err := w.Write([]byte("401 Unauthorized\n")); err != nil { - api.log.Error().Msgf("RequestWriter.Write return error: %v", err) + log.Error().Msgf("RequestWriter.Write return error: %v", err) } return } @@ -316,7 +309,7 @@ func (api *APIServer) actions(w http.ResponseWriter, r *http.Request) { api.writeError(w, http.StatusBadRequest, string(line), err) return } - api.log.Info().Msgf("/backup/actions call: %s", row.Command) + log.Info().Msgf("/backup/actions call: %s", row.Command) args, err := shlex.Split(row.Command) if err != nil { api.writeError(w, http.StatusBadRequest, "", err) @@ -375,14 +368,14 @@ func (api *APIServer) actionsDeleteHandler(row status.ActionRow, args []string, } go func() { if err := api.UpdateBackupMetrics(context.Background(), args[1] == "local"); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() actionsResults = append(actionsResults, actionsResultsRow{ Status: "success", Operation: row.Command, }) - api.log.Info().Msg("DELETED") + log.Info().Msg("DELETED") return actionsResults, nil } @@ -397,12 +390,12 @@ func (api *APIServer) actionsAsyncCommandsHandler(command string, args []string, }) status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("API /backup/actions error: %v", err) + log.Error().Msgf("API /backup/actions error: %v", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), command == "create" || command == "restore"); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() }() @@ -433,7 +426,7 @@ func (api *APIServer) actionsKillHandler(row status.ActionRow, args []string, ac func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row status.ActionRow, command string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } commandId, ctx := status.Current.Start(command) @@ -445,14 +438,14 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Error().Msgf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) status.Current.Stop(commandId, err) return actionsResults, err } - api.log.Info().Msg("CLEANED") + log.Info().Msg("CLEANED") metricsErr := api.UpdateBackupMetrics(ctx, false) if metricsErr != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } status.Current.Stop(commandId, nil) actionsResults = append(actionsResults, actionsResultsRow{ @@ -464,7 +457,7 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.ActionRow, args []string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if (!api.config.API.AllowParallel && status.Current.InProgress()) || status.Current.CheckCommandInProgress(row.Command) { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } cfg, err := api.ReloadConfig(w, "watch") @@ -542,7 +535,7 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("Watch error: %v", err) + log.Error().Msgf("Watch error: %v", err) return } }() @@ -565,7 +558,7 @@ func (api *APIServer) actionsLog(w http.ResponseWriter, r *http.Request) { if q.Get("last") != "" { last, err = strconv.ParseInt(q.Get("last"), 10, 16) if err != nil { - api.log.Warn().Err(err).Send() + log.Warn().Err(err).Send() api.writeError(w, http.StatusInternalServerError, "actions", err) return } @@ -780,7 +773,7 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { // httpCreateHandler - create a backup func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "create", ErrAPILocked) return } @@ -836,7 +829,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "create", err) return } @@ -848,13 +841,13 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) return b.CreateBackup(backupName, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { - api.log.Error().Msgf("API /backup/create error: %v", err) + log.Error().Msgf("API /backup/create error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, true); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -876,7 +869,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) // httpWatchHandler - run watch command go routine, can't run the same watch command twice func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -941,7 +934,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { } if status.Current.CheckCommandInProgress(fullCommand) { - api.log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) + log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -952,7 +945,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("Watch error: %v", err) + log.Error().Msgf("Watch error: %v", err) return } }() @@ -976,7 +969,7 @@ func (api *APIServer) httpCleanHandler(w http.ResponseWriter, _ *http.Request) { err = b.Clean(ctx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("Clean error: %v", err) + log.Error().Msgf("Clean error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean", err) return } @@ -1001,14 +994,14 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Error().Msgf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } err = api.UpdateBackupMetrics(ctx, false) if err != nil { - api.log.Error().Msgf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } @@ -1025,7 +1018,7 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt // httpUploadHandler - upload a backup to remote storage func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "upload", ErrAPILocked) return } @@ -1073,7 +1066,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "upload", err) return } @@ -1085,13 +1078,13 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) return b.Upload(name, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { - api.log.Error().Msgf("Upload error: %v", err) + log.Error().Msgf("Upload error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, false); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -1119,7 +1112,7 @@ var databaseMappingRE = regexp.MustCompile(`[\w+]:[\w+]`) // httpRestoreHandler - restore a backup from local storage func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "restore", ErrAPILocked) return } @@ -1197,7 +1190,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "restore", err) return } @@ -1210,7 +1203,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) }) status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("API /backup/restore error: %v", err) + log.Error().Msgf("API /backup/restore error: %v", err) api.errorCallback(r.Context(), err, callback) return } @@ -1230,7 +1223,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) // httpDownloadHandler - download a backup from remote to local storage func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "download", ErrAPILocked) return } @@ -1268,7 +1261,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "download", err) return } @@ -1280,13 +1273,13 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request return b.Download(name, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { - api.log.Error().Msgf("API /backup/download error: %v", err) + log.Error().Msgf("API /backup/download error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, true); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -1308,7 +1301,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request // httpDeleteHandler - delete a backup from local or remote storage func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "delete", ErrAPILocked) return } @@ -1330,13 +1323,13 @@ func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) } status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("delete backup error: %v", err) + log.Error().Msgf("delete backup error: %v", err) api.writeError(w, http.StatusInternalServerError, "delete", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), vars["where"] == "local"); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() api.sendJSONEachRow(w, http.StatusOK, struct { @@ -1368,7 +1361,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e numberBackupsRemote := 0 numberBackupsRemoteBroken := 0 - api.log.Info().Msgf("Update backup metrics start (onlyLocal=%v)", onlyLocal) + log.Info().Msgf("Update backup metrics start (onlyLocal=%v)", onlyLocal) if !api.config.API.EnableMetrics { return nil } @@ -1430,7 +1423,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e api.metrics.LastFinish["create_remote"].Set(float64(lastBackupUpload.Unix())) } } - api.log.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "duration": utils.HumanizeDuration(time.Since(startTime)), "LastBackupCreateLocal": lastBackupCreateLocal, "LastBackupCreateRemote": lastBackupCreateRemote, @@ -1469,10 +1462,9 @@ func (api *APIServer) registerMetricsHandlers(r *mux.Router, enableMetrics bool, } func (api *APIServer) CreateIntegrationTables() error { - api.log.Info().Msgf("Create integration tables") + log.Info().Msgf("Create integration tables") ch := &clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %w", err) @@ -1516,14 +1508,13 @@ func (api *APIServer) CreateIntegrationTables() error { func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*config.Config, error) { cfg, err := config.LoadConfig(api.configPath) if err != nil { - api.log.Error().Msgf("config.LoadConfig(%s) return error: %v", api.configPath, err) + log.Error().Msgf("config.LoadConfig(%s) return error: %v", api.configPath, err) if w != nil { api.writeError(w, http.StatusInternalServerError, command, err) } return nil, err } api.config = cfg - api.log = log.With().Str("logger", "server").Logger() api.metrics.NumberBackupsRemoteExpected.Set(float64(cfg.General.BackupsToKeepRemote)) api.metrics.NumberBackupsLocalExpected.Set(float64(cfg.General.BackupsToKeepLocal)) return cfg, nil @@ -1532,14 +1523,13 @@ func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*conf func (api *APIServer) ResumeOperationsAfterRestart() error { ch := clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { return err } defer func() { if err := ch.GetConn().Close(); err != nil { - api.log.Error().Msgf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) + log.Error().Msgf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) } }() disks, err := ch.GetDisks(context.Background()) @@ -1598,7 +1588,7 @@ func (api *APIServer) ResumeOperationsAfterRestart() error { } args = append(args, "--resumable=1", backupName) fullCommand := strings.Join(args, " ") - api.log.Info().Str("operation", "ResumeOperationsAfterRestart").Send() + log.Info().Str("operation", "ResumeOperationsAfterRestart").Send() commandId, _ := status.Current.Start(fullCommand) err, _ = api.metrics.ExecuteWithMetrics(command, 0, func() error { return api.cliApp.Run(append([]string{"clickhouse-backup", "-c", api.configPath, "--command-id", strconv.FormatInt(int64(commandId), 10)}, args...)) diff --git a/pkg/server/utils.go b/pkg/server/utils.go index 489b9f38..caa1d82b 100644 --- a/pkg/server/utils.go +++ b/pkg/server/utils.go @@ -4,18 +4,19 @@ import ( "context" "encoding/json" "fmt" + "github.com/rs/zerolog/log" "net/http" "reflect" ) func (api *APIServer) flushOutput(w http.ResponseWriter, out string) { if _, err := fmt.Fprintln(w, out); err != nil { - api.log.Warn().Msgf("can't write to http.ResponseWriter: %v", err) + log.Warn().Msgf("can't write to http.ResponseWriter: %v", err) } } func (api *APIServer) writeError(w http.ResponseWriter, statusCode int, operation string, err error) { - api.log.Error().Msgf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) + log.Error().Msgf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) w.WriteHeader(statusCode) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate") @@ -45,7 +46,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) + log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } default: @@ -53,7 +54,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) + log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } } @@ -71,7 +72,7 @@ func (api *APIServer) errorCallback(ctx context.Context, err error, callback cal Error: err.Error(), } for _, e := range callback(ctx, payload) { - api.log.Error().Err(e).Send() + log.Error().Err(e).Send() } } @@ -82,6 +83,6 @@ func (api *APIServer) successCallback(ctx context.Context, callback callbackFn) Error: "", } for _, e := range callback(ctx, payload) { - api.log.Error().Err(e).Send() + log.Error().Err(e).Send() } } diff --git a/pkg/status/status.go b/pkg/status/status.go index 366632ad..f4c0a342 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -3,13 +3,12 @@ package status import ( "context" "fmt" - "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "strings" "sync" "time" "github.com/Altinity/clickhouse-backup/pkg/common" - "github.com/rs/zerolog/log" ) const ( @@ -19,15 +18,12 @@ const ( ErrorStatus = "error" ) -var Current = &AsyncStatus{ - logger: log.With().Str("logger", "status").Logger(), -} +var Current = &AsyncStatus{} const NotFromAPI = int(-1) type AsyncStatus struct { commands []ActionRow - logger zerolog.Logger sync.RWMutex } @@ -59,7 +55,7 @@ func (status *AsyncStatus) Start(command string) (int, context.Context) { Cancel: cancel, }) lastCommandId := len(status.commands) - 1 - status.logger.Debug().Msgf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) + log.Debug().Msgf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) return lastCommandId, ctx } @@ -79,10 +75,10 @@ func (status *AsyncStatus) InProgress() bool { defer status.RUnlock() n := len(status.commands) - 1 if n < 0 { - status.logger.Debug().Msgf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) + log.Debug().Msgf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) return false } - status.logger.Debug().Msgf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) + log.Debug().Msgf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) return status.commands[n].Status == InProgressStatus } @@ -118,7 +114,7 @@ func (status *AsyncStatus) Stop(commandId int, err error) { status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) status.commands[commandId].Ctx = nil status.commands[commandId].Cancel = nil - status.logger.Debug().Msgf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } func (status *AsyncStatus) Cancel(command string, err error) error { @@ -126,7 +122,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { defer status.Unlock() if len(status.commands) == 0 { err = fmt.Errorf("empty command list") - status.logger.Warn().Err(err).Send() + log.Warn().Err(err).Send() return err } commandId := -1 @@ -147,11 +143,11 @@ func (status *AsyncStatus) Cancel(command string, err error) error { } if commandId == -1 { err = fmt.Errorf("command `%s` not found", command) - status.logger.Warn().Err(err).Send() + log.Warn().Err(err).Send() return err } if status.commands[commandId].Status != InProgressStatus { - status.logger.Warn().Msgf("found `%s` with status=%s", command, status.commands[commandId].Status) + log.Warn().Msgf("found `%s` with status=%s", command, status.commands[commandId].Status) } if status.commands[commandId].Ctx != nil { status.commands[commandId].Cancel() @@ -161,7 +157,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { status.commands[commandId].Error = err.Error() status.commands[commandId].Status = CancelStatus status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.logger.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) return nil } @@ -177,7 +173,7 @@ func (status *AsyncStatus) CancelAll(cancelMsg string) { status.commands[commandId].Status = CancelStatus status.commands[commandId].Error = cancelMsg status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.logger.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } } diff --git a/pkg/storage/ftp.go b/pkg/storage/ftp.go index ee9dd14f..b0edd1f5 100644 --- a/pkg/storage/ftp.go +++ b/pkg/storage/ftp.go @@ -14,13 +14,12 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/jlaffaye/ftp" "github.com/jolestar/go-commons-pool/v2" - "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) type FTP struct { clients *pool.ObjectPool Config *config.FTPConfig - Logger zerolog.Logger dirCache map[string]bool dirCacheMutex sync.RWMutex } @@ -64,21 +63,21 @@ func (f *FTP) Close(ctx context.Context) error { // getConnectionFromPool *ftp.ServerConn is not thread-safe, so we need implements connection pool func (f *FTP) getConnectionFromPool(ctx context.Context, where string) (*ftp.ServerConn, error) { - f.Logger.Debug().Msgf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + log.Debug().Msgf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) client, err := f.clients.BorrowObject(ctx) if err != nil { - f.Logger.Error().Msgf("can't BorrowObject from FTP Connection Pool: %v", err) + log.Error().Msgf("can't BorrowObject from FTP Connection Pool: %v", err) return nil, err } return client.(*ftp.ServerConn), nil } func (f *FTP) returnConnectionToPool(ctx context.Context, where string, client *ftp.ServerConn) { - f.Logger.Debug().Msgf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + log.Debug().Msgf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) if client != nil { err := f.clients.ReturnObject(ctx, client) if err != nil { - f.Logger.Error().Msgf("can't ReturnObject to FTP Connection Pool: %v", err) + log.Error().Msgf("can't ReturnObject to FTP Connection Pool: %v", err) } } } @@ -175,7 +174,7 @@ func (f *FTP) Walk(ctx context.Context, ftpPath string, recursive bool, process } func (f *FTP) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - f.Logger.Debug().Msgf("GetFileReader key=%s", key) + log.Debug().Msgf("GetFileReader key=%s", key) client, err := f.getConnectionFromPool(ctx, "GetFileReader") if err != nil { return nil, err @@ -194,7 +193,7 @@ func (f *FTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) (io } func (f *FTP) PutFile(ctx context.Context, key string, r io.ReadCloser) error { - f.Logger.Debug().Msgf("PutFile key=%s", key) + log.Debug().Msgf("PutFile key=%s", key) client, err := f.getConnectionFromPool(ctx, "PutFile") defer f.returnConnectionToPool(ctx, "PutFile", client) if err != nil { @@ -239,7 +238,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.RLock() if _, exists := f.dirCache[d]; exists { f.dirCacheMutex.RUnlock() - f.Logger.Debug().Msgf("MkdirAll %s exists in dirCache", d) + log.Debug().Msgf("MkdirAll %s exists in dirCache", d) continue } f.dirCacheMutex.RUnlock() @@ -247,7 +246,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.Lock() err = client.MakeDir(d) if err != nil { - f.Logger.Warn().Msgf("MkdirAll MakeDir(%s) return error: %v", d, err) + log.Warn().Msgf("MkdirAll MakeDir(%s) return error: %v", d, err) } else { f.dirCache[d] = true } diff --git a/pkg/storage/general.go b/pkg/storage/general.go index 2899139c..24ecc957 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/rs/zerolog" "io" "os" "path" @@ -49,7 +48,6 @@ type Backup struct { type BackupDestination struct { RemoteStorage - Logger zerolog.Logger compressionFormat string compressionLevel int disableProgressBar bool @@ -67,23 +65,23 @@ func (bd *BackupDestination) RemoveOldBackups(ctx context.Context, keep int) err return err } backupsToDelete := GetBackupsToDelete(backupList, keep) - bd.Logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "duration": utils.HumanizeDuration(time.Since(start)), }).Msg("calculate backup list for delete") for _, backupToDelete := range backupsToDelete { startDelete := time.Now() if err := bd.RemoveBackup(ctx, backupToDelete); err != nil { - bd.Logger.Warn().Msgf("can't delete %s return error : %v", backupToDelete.BackupName, err) + log.Warn().Msgf("can't delete %s return error : %v", backupToDelete.BackupName, err) } - bd.Logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "location": "remote", "backup": backupToDelete.BackupName, "duration": utils.HumanizeDuration(time.Since(startDelete)), }).Msg("done") } - bd.Logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "duration": utils.HumanizeDuration(time.Since(start)), }).Msg("done") @@ -123,17 +121,17 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) listCache := map[string]Backup{} if info, err := os.Stat(listCacheFile); os.IsNotExist(err) || info.IsDir() { - bd.Logger.Debug().Msgf("%s not found, load %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s not found, load %d elements", listCacheFile, len(listCache)) return listCache, nil } f, err := os.Open(listCacheFile) if err != nil { - bd.Logger.Warn().Msgf("can't open %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return listCache, nil } defer func() { if err := f.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() select { @@ -142,15 +140,15 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] default: body, err := io.ReadAll(f) if err != nil { - bd.Logger.Warn().Msgf("can't read %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't read %s return error %v", listCacheFile, err) return listCache, nil } if string(body) != "" { if err := json.Unmarshal(body, &listCache); err != nil { - bd.Logger.Fatal().Stack().Msgf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) + log.Fatal().Stack().Msgf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) } } - bd.Logger.Debug().Msgf("%s load %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s load %d elements", listCacheFile, len(listCache)) return listCache, nil } } @@ -159,12 +157,12 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) f, err := os.OpenFile(listCacheFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - bd.Logger.Warn().Msgf("can't open %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return nil } defer func() { if err := f.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() for backupName := range listCache { @@ -190,15 +188,15 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma default: body, err := json.MarshalIndent(&listCache, "", "\t") if err != nil { - bd.Logger.Warn().Msgf("can't json marshal %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't json marshal %s return error %v", listCacheFile, err) return nil } _, err = f.Write(body) if err != nil { - bd.Logger.Warn().Msgf("can't write to %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't write to %s return error %v", listCacheFile, err) return nil } - bd.Logger.Debug().Msgf("%s save %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s save %d elements", listCacheFile, len(listCache)) return nil } } @@ -311,7 +309,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, return nil }) if err != nil { - bd.Logger.Warn().Msgf("BackupList bd.Walk return error: %v", err) + log.Warn().Msgf("BackupList bd.Walk return error: %v", err) } // sort by name for the same not parsed metadata.json sort.SliceStable(result, func(i, j int) bool { @@ -343,13 +341,13 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot } defer func() { if err := reader.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close GetFileReader descriptor %v", reader) + log.Warn().Msgf("can't close GetFileReader descriptor %v", reader) } switch reader.(type) { case *os.File: fileName := reader.(*os.File).Name() if err := os.Remove(fileName); err != nil { - bd.Logger.Warn().Msgf("can't remove %s", fileName) + log.Warn().Msgf("can't remove %s", fileName) } } }() @@ -361,7 +359,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot proxyReader := bar.NewProxyReader(bufReader) compressionFormat := bd.compressionFormat if !checkArchiveExtension(path.Ext(remotePath), compressionFormat) { - bd.Logger.Warn().Msgf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) + log.Warn().Msgf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) compressionFormat = strings.Replace(path.Ext(remotePath), ".", "", -1) } z, err := getArchiveReader(compressionFormat) @@ -402,7 +400,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot if err := f.Close(); err != nil { return err } - //bd.Logger.Debug().Msgf("extract %s", extractFile) + //log.Debug().Msgf("extract %s", extractFile) return nil }); err != nil { return err @@ -437,11 +435,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if writerErr != nil { if err := w.CloseWithError(writerErr); err != nil { - bd.Logger.Error().Msgf("can't close after error %v pipe writer error: %v", writerErr, err) + log.Error().Msgf("can't close after error %v pipe writer error: %v", writerErr, err) } } else { if err := w.Close(); err != nil { - bd.Logger.Error().Msgf("can't close pipe writer: %v", err) + log.Error().Msgf("can't close pipe writer: %v", err) } } }() @@ -468,7 +466,7 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc }, } archiveFiles = append(archiveFiles, file) - //bd.Logger.Debug().Msgf("add %s to archive %s", filePath, remotePath) + //log.Debug().Msgf("add %s to archive %s", filePath, remotePath) } if writerErr = z.Archive(ctx, w, archiveFiles); writerErr != nil { return writerErr @@ -479,11 +477,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if readerErr != nil { if err := body.CloseWithError(readerErr); err != nil { - bd.Logger.Error().Msgf("can't close after error %v pipe reader error: %v", writerErr, err) + log.Error().Msgf("can't close after error %v pipe reader error: %v", writerErr, err) } } else { if err := body.Close(); err != nil { - bd.Logger.Error().Msgf("can't close pipe reader: %v", err) + log.Error().Msgf("can't close pipe reader: %v", err) } } }() @@ -508,10 +506,6 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot bar = progressbar.StartNewByteBar(!bd.disableProgressBar, totalBytes) defer bar.Finish() } - logger := bd.Logger.With().Fields(map[string]interface{}{ - "path": remotePath, - "operation": "download", - }).Logger() return bd.Walk(ctx, remotePath, true, func(ctx context.Context, f RemoteFile) error { if bd.Kind() == "SFTP" && (f.Name() == "." || f.Name() == "..") { return nil @@ -520,30 +514,30 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot err := retry.RunCtx(ctx, func(ctx context.Context) error { r, err := bd.GetFileReader(ctx, path.Join(remotePath, f.Name())) if err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } dstFilePath := path.Join(localPath, f.Name()) dstDirPath, _ := path.Split(dstFilePath) if err := os.MkdirAll(dstDirPath, 0750); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } dst, err := os.Create(dstFilePath) if err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } if _, err := io.CopyBuffer(dst, r, nil); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } if err := dst.Close(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } if err := r.Close(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } return nil @@ -584,7 +578,7 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc } closeFile := func() { if err := f.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close UploadPath file descriptor %v: %v", f, err) + log.Warn().Msgf("can't close UploadPath file descriptor %v: %v", f, err) } } retry := retrier.New(retrier.ConstantBackoff(RetriesOnFailure, RetriesDuration), nil) @@ -609,7 +603,6 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc } func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, calcMaxSize bool, backupName string) (*BackupDestination, error) { - logger := log.With().Str("logger", "NewBackupDestination").Logger() var err error // https://github.com/Altinity/clickhouse-backup/issues/404 if calcMaxSize { @@ -618,7 +611,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous return nil, err } if cfg.General.MaxFileSize > 0 && cfg.General.MaxFileSize < maxFileSize { - logger.Warn().Msgf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) + log.Warn().Msgf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) } if cfg.General.MaxFileSize <= 0 || cfg.General.MaxFileSize < maxFileSize { cfg.General.MaxFileSize = maxFileSize @@ -646,7 +639,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous azblobStorage.Config.BufferSize = bufferSize return &BackupDestination{ azblobStorage, - logger.With().Str("logger", "azure").Logger(), cfg.AzureBlob.CompressionFormat, cfg.AzureBlob.CompressionLevel, cfg.General.DisableProgressBar, @@ -667,7 +659,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous Concurrency: cfg.S3.Concurrency, BufferSize: 512 * 1024, PartSize: partSize, - Logger: log.With().Str("logger", "S3").Logger(), } s3Storage.Config.Path, err = ch.ApplyMacros(ctx, s3Storage.Config.Path) if err != nil { @@ -684,7 +675,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ s3Storage, - log.With().Str("logger", "s3").Logger(), cfg.S3.CompressionFormat, cfg.S3.CompressionLevel, cfg.General.DisableProgressBar, @@ -706,7 +696,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ googleCloudStorage, - log.With().Str("logger", "gcs").Logger(), cfg.GCS.CompressionFormat, cfg.GCS.CompressionLevel, cfg.General.DisableProgressBar, @@ -719,7 +708,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ tencentStorage, - log.With().Str("logger", "cos").Logger(), cfg.COS.CompressionFormat, cfg.COS.CompressionLevel, cfg.General.DisableProgressBar, @@ -727,7 +715,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous case "ftp": ftpStorage := &FTP{ Config: &cfg.FTP, - Logger: log.With().Str("logger", "FTP").Logger(), } ftpStorage.Config.Path, err = ch.ApplyMacros(ctx, ftpStorage.Config.Path) if err != nil { @@ -735,7 +722,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ ftpStorage, - log.With().Str("logger", "FTP").Logger(), cfg.FTP.CompressionFormat, cfg.FTP.CompressionLevel, cfg.General.DisableProgressBar, @@ -750,7 +736,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ sftpStorage, - log.With().Str("logger", "SFTP").Logger(), cfg.SFTP.CompressionFormat, cfg.SFTP.CompressionLevel, cfg.General.DisableProgressBar, diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 31d5faab..65e14921 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -4,10 +4,6 @@ import ( "context" "crypto/tls" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/config" - "github.com/aws/smithy-go" - awsV2http "github.com/aws/smithy-go/transport/http" - "github.com/rs/zerolog" "io" "net/http" "os" @@ -15,8 +11,7 @@ import ( "strings" "time" - "golang.org/x/sync/errgroup" - + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/aws/aws-sdk-go-v2/aws" awsV2Config "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" @@ -25,17 +20,22 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go" awsV2Logging "github.com/aws/smithy-go/logging" + awsV2http "github.com/aws/smithy-go/transport/http" "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" ) type S3LogToZeroLogAdapter struct { logger zerolog.Logger } -func newS3Logger(log zerolog.Logger) S3LogToZeroLogAdapter { +func newS3Logger(logger zerolog.Logger) S3LogToZeroLogAdapter { return S3LogToZeroLogAdapter{ - logger: log, + logger: logger, } } @@ -54,7 +54,6 @@ type S3 struct { uploader *s3manager.Uploader downloader *s3manager.Downloader Config *config.S3Config - Logger zerolog.Logger PartSize int64 Concurrency int BufferSize int @@ -108,7 +107,7 @@ func (s *S3) Connect(ctx context.Context) error { } if s.Config.Debug { - awsConfig.Logger = newS3Logger(s.Logger) + awsConfig.Logger = newS3Logger(log.Logger) awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponse } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 63de1a68..fb805a2d 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1912,7 +1912,6 @@ func (ch *TestClickHouse) connect(timeOut string) error { Port: 9000, Timeout: timeOut, }, - Logger: log.With().Str("logger", "clickhouse").Logger(), } var err error for i := 0; i < 3; i++ {