diff --git a/cmd/internal/backup/backup.go b/cmd/internal/backup/backup.go index 5c33061..682ba7b 100644 --- a/cmd/internal/backup/backup.go +++ b/cmd/internal/backup/backup.go @@ -118,7 +118,13 @@ func (b *Backuper) CreateBackup(ctx context.Context) error { b.log.Info("encrypted backup") } - err = b.bp.UploadBackup(ctx, filename) + file, err := os.Open(filename) + if err != nil { + b.metrics.CountError("open") + return fmt.Errorf("error opening backup file: %w", err) + } + + err = b.bp.UploadBackup(ctx, file) if err != nil { b.metrics.CountError("upload") return fmt.Errorf("error uploading backup: %w", err) diff --git a/cmd/internal/backup/providers/contract.go b/cmd/internal/backup/providers/contract.go index 4356dbd..3c57987 100644 --- a/cmd/internal/backup/providers/contract.go +++ b/cmd/internal/backup/providers/contract.go @@ -2,6 +2,7 @@ package providers import ( "context" + "io" "time" ) @@ -10,8 +11,8 @@ type BackupProvider interface { ListBackups(ctx context.Context) (BackupVersions, error) CleanupBackups(ctx context.Context) error GetNextBackupName(ctx context.Context) string - DownloadBackup(ctx context.Context, version *BackupVersion, outDir string) (string, error) - UploadBackup(ctx context.Context, sourcePath string) error + DownloadBackup(ctx context.Context, version *BackupVersion, writer io.Writer) error + UploadBackup(ctx context.Context, reader io.Reader) error } type BackupVersions interface { diff --git a/cmd/internal/backup/providers/gcp/gcp.go b/cmd/internal/backup/providers/gcp/gcp.go index cd3e51b..38e1469 100644 --- a/cmd/internal/backup/providers/gcp/gcp.go +++ b/cmd/internal/backup/providers/gcp/gcp.go @@ -6,13 +6,13 @@ import ( "io" "log/slog" "net/http" - "path/filepath" "strconv" - "strings" "errors" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/encryption" "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/spf13/afero" @@ -24,15 +24,17 @@ import ( ) const ( - defaultBackupName = "db" + ProviderConstant = "db" ) // BackupProviderGCP implements the backup provider interface for GCP type BackupProviderGCP struct { - fs afero.Fs - log *slog.Logger - c *storage.Client - config *BackupProviderConfigGCP + fs afero.Fs + log *slog.Logger + c *storage.Client + config *BackupProviderConfigGCP + encrypter *encryption.Encrypter + compressor *compress.Compressor } // BackupProviderConfigGCP provides configuration for the BackupProviderGCP @@ -45,6 +47,8 @@ type BackupProviderConfigGCP struct { ProjectID string FS afero.Fs ClientOpts []option.ClientOption + Encrypter *encryption.Encrypter + Compressor *compress.Compressor } func (c *BackupProviderConfigGCP) validate() error { @@ -72,9 +76,6 @@ func New(ctx context.Context, log *slog.Logger, config *BackupProviderConfigGCP) if config.ObjectsToKeep == 0 { config.ObjectsToKeep = constants.DefaultObjectsToKeep } - if config.BackupName == "" { - config.BackupName = defaultBackupName - } if config.FS == nil { config.FS = afero.NewOsFs() } @@ -90,10 +91,12 @@ func New(ctx context.Context, log *slog.Logger, config *BackupProviderConfigGCP) } return &BackupProviderGCP{ - c: client, - config: config, - log: log, - fs: config.FS, + c: client, + config: config, + log: log, + fs: config.FS, + compressor: config.Compressor, + encrypter: config.Encrypter, }, nil } @@ -147,63 +150,51 @@ func (b *BackupProviderGCP) CleanupBackups(_ context.Context) error { } // DownloadBackup downloads the given backup version to the specified folder -func (b *BackupProviderGCP) DownloadBackup(ctx context.Context, version *providers.BackupVersion, outDir string) (string, error) { +func (b *BackupProviderGCP) DownloadBackup(ctx context.Context, version *providers.BackupVersion, writer io.Writer) error { gen, err := strconv.ParseInt(version.Version, 10, 64) if err != nil { - return "", err + return err } bucket := b.c.Bucket(b.config.BucketName) - downloadFileName := version.Name - if strings.Contains(downloadFileName, "/") { - downloadFileName = filepath.Base(downloadFileName) - } - - backupFilePath := filepath.Join(outDir, downloadFileName) - - b.log.Info("downloading", "object", version.Name, "gen", gen, "to", backupFilePath) + b.log.Info("downloading", "object", version.Name, "gen", gen) r, err := bucket.Object(version.Name).Generation(gen).NewReader(ctx) if err != nil { - return "", fmt.Errorf("backup not found: %w", err) + return fmt.Errorf("backup not found: %w", err) } defer r.Close() - f, err := b.fs.Create(backupFilePath) + _, err = io.Copy(writer, r) if err != nil { - return "", err + return fmt.Errorf("error writing file from gcp to filesystem: %w", err) } - defer f.Close() - _, err = io.Copy(f, r) - if err != nil { - return "", fmt.Errorf("error writing file from gcp to filesystem: %w", err) - } - - return backupFilePath, nil + return nil } // UploadBackup uploads a backup to the backup provider -func (b *BackupProviderGCP) UploadBackup(ctx context.Context, sourcePath string) error { +func (b *BackupProviderGCP) UploadBackup(ctx context.Context, reader io.Reader) error { bucket := b.c.Bucket(b.config.BucketName) - r, err := b.fs.Open(sourcePath) - if err != nil { - return err + destination := ProviderConstant + if b.compressor != nil { + destination += b.compressor.Extension() + } + if b.encrypter != nil { + destination += b.encrypter.Extension() } - defer r.Close() - destination := filepath.Base(sourcePath) if b.config.ObjectPrefix != "" { destination = b.config.ObjectPrefix + "/" + destination } - b.log.Debug("uploading object", "src", sourcePath, "dest", destination) + b.log.Debug("uploading object", "dest", destination) obj := bucket.Object(destination) w := obj.NewWriter(ctx) - if _, err := io.Copy(w, r); err != nil { + if _, err := io.Copy(w, reader); err != nil { return err } defer w.Close() diff --git a/cmd/internal/backup/providers/gcp/gcp_integration_test.go b/cmd/internal/backup/providers/gcp/gcp_integration_test.go index caf22dc..88d3128 100644 --- a/cmd/internal/backup/providers/gcp/gcp_integration_test.go +++ b/cmd/internal/backup/providers/gcp/gcp_integration_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/docker/docker/api/types/container" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/spf13/afero" "github.com/stretchr/testify/assert" @@ -53,7 +54,7 @@ func Test_BackupProviderGCP(t *testing.T) { var ( endpoint = conn.Endpoint + "/storage/v1/" backupAmount = 5 - expectedBackupName = defaultBackupName + ".tar.gz" + expectedBackupName = "db.tar.gz" prefix = fmt.Sprintf("test-with-%d", backupAmount) fs = afero.NewMemMapFs() @@ -64,6 +65,9 @@ func Test_BackupProviderGCP(t *testing.T) { httpClient = &http.Client{Transport: transCfg} ) + compressor, err := compress.New("targz") + require.NoError(t, err) + p, err := New(ctx, log, &BackupProviderConfigGCP{ BucketName: "test", BucketLocation: "europe-west3", @@ -71,7 +75,9 @@ func Test_BackupProviderGCP(t *testing.T) { ProjectID: "test-project-id", FS: fs, ClientOpts: []option.ClientOption{option.WithEndpoint(endpoint), option.WithHTTPClient(httpClient)}, + Compressor: compressor, }) + require.NoError(t, err) require.NotNil(t, p) @@ -95,7 +101,9 @@ func Test_BackupProviderGCP(t *testing.T) { err = afero.WriteFile(fs, backupPath, []byte(backupContent), 0600) require.NoError(t, err) - err = p.UploadBackup(ctx, backupPath) + backupFile, err := fs.Open(backupPath) + require.NoError(t, err) + err = p.UploadBackup(ctx, backupFile) require.NoError(t, err) // cleaning up after test @@ -153,17 +161,20 @@ func Test_BackupProviderGCP(t *testing.T) { latestVersion := versions.Latest() require.NotNil(t, latestVersion) - backupFilePath, err := p.DownloadBackup(ctx, latestVersion, "") + outputFile, err := fs.Create("outputfile") + require.NoError(t, err) + + err = p.DownloadBackup(ctx, latestVersion, outputFile) require.NoError(t, err) - gotContent, err := afero.ReadFile(fs, backupFilePath) + gotContent, err := afero.ReadFile(fs, outputFile.Name()) require.NoError(t, err) backupContent := fmt.Sprintf("precious data %d", backupAmount-1) require.Equal(t, backupContent, string(gotContent)) // cleaning up after test - err = fs.Remove(backupFilePath) + err = fs.Remove(outputFile.Name()) require.NoError(t, err) }) diff --git a/cmd/internal/backup/providers/local/local.go b/cmd/internal/backup/providers/local/local.go index 8174003..6311957 100644 --- a/cmd/internal/backup/providers/local/local.go +++ b/cmd/internal/backup/providers/local/local.go @@ -3,6 +3,7 @@ package local import ( "context" "fmt" + "io" "log/slog" "os" "path/filepath" @@ -11,13 +12,15 @@ import ( "errors" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" - "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/encryption" "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/spf13/afero" ) const ( defaultLocalBackupPath = constants.SidecarBaseDir + "/local-provider" + ProviderConstant = "db" ) // BackupProviderLocal implements the backup provider interface for no backup provider (useful to disable sidecar functionality in development environments) @@ -26,6 +29,8 @@ type BackupProviderLocal struct { log *slog.Logger config *BackupProviderConfigLocal nextBackupCount int64 + encrypter *encryption.Encrypter + compressor *compress.Compressor } // BackupProviderConfigLocal provides configuration for the BackupProviderLocal @@ -33,6 +38,8 @@ type BackupProviderConfigLocal struct { LocalBackupPath string ObjectsToKeep int64 FS afero.Fs + Encrypter *encryption.Encrypter + Compressor *compress.Compressor } func (c *BackupProviderConfigLocal) validate() error { @@ -61,9 +68,11 @@ func New(log *slog.Logger, config *BackupProviderConfigLocal) (*BackupProviderLo } return &BackupProviderLocal{ - config: config, - log: log, - fs: config.FS, + config: config, + log: log, + fs: config.FS, + encrypter: config.Encrypter, + compressor: config.Compressor, }, nil } @@ -86,28 +95,44 @@ func (b *BackupProviderLocal) CleanupBackups(_ context.Context) error { } // DownloadBackup downloads the given backup version to the specified folder -func (b *BackupProviderLocal) DownloadBackup(_ context.Context, version *providers.BackupVersion, outDir string) (string, error) { +func (b *BackupProviderLocal) DownloadBackup(_ context.Context, version *providers.BackupVersion, writer io.Writer) error { b.log.Info("download backup called for provider local") source := filepath.Join(b.config.LocalBackupPath, version.Name) - backupFilePath := filepath.Join(outDir, version.Name) + infile, err := b.fs.Open(source) + if err != nil { + return fmt.Errorf("could not open file %s: %w", source, err) + } + defer infile.Close() - err := utils.Copy(b.fs, source, backupFilePath) + _, err = io.Copy(writer, infile) if err != nil { - return "", err + return err } - return backupFilePath, err + return err } -// UploadBackup uploads a backup to the backup provider -func (b *BackupProviderLocal) UploadBackup(_ context.Context, sourcePath string) error { +// UploadBackup uploads a backup to the backup provider by providing a reader to the backup archive +func (b *BackupProviderLocal) UploadBackup(ctx context.Context, reader io.Reader) error { b.log.Info("upload backups called for provider local") - destination := filepath.Join(b.config.LocalBackupPath, filepath.Base(sourcePath)) + destination := b.config.LocalBackupPath + "/" + b.GetNextBackupName(ctx) + b.nextBackupCount++ + b.nextBackupCount = b.nextBackupCount % b.config.ObjectsToKeep + if b.compressor != nil { + destination = destination + b.compressor.Extension() + } + if b.encrypter != nil { + destination = destination + b.encrypter.Extension() + } + output, err := b.fs.Create(destination) + if err != nil { + return fmt.Errorf("could not create file %s: %w", destination, err) + } - err := utils.Copy(b.fs, sourcePath, destination) + _, err = io.Copy(output, reader) if err != nil { return err } @@ -118,8 +143,6 @@ func (b *BackupProviderLocal) UploadBackup(_ context.Context, sourcePath string) // GetNextBackupName returns a name for the next backup archive that is going to be uploaded func (b *BackupProviderLocal) GetNextBackupName(_ context.Context) string { name := strconv.FormatInt(b.nextBackupCount, 10) - b.nextBackupCount++ - b.nextBackupCount = b.nextBackupCount % b.config.ObjectsToKeep return name } diff --git a/cmd/internal/backup/providers/local/local_test.go b/cmd/internal/backup/providers/local/local_test.go index 05286d6..5c1af3e 100644 --- a/cmd/internal/backup/providers/local/local_test.go +++ b/cmd/internal/backup/providers/local/local_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/spf13/afero" "github.com/stretchr/testify/assert" @@ -26,8 +27,11 @@ func Test_BackupProviderLocal(t *testing.T) { t.Run(fmt.Sprintf("testing with %d backups", backupAmount), func(t *testing.T) { fs := afero.NewMemMapFs() + compressor, err := compress.New("targz") + require.NoError(t, err) p, err := New(log, &BackupProviderConfigLocal{ - FS: fs, + FS: fs, + Compressor: compressor, }) require.NoError(t, err) require.NotNil(t, p) @@ -47,18 +51,21 @@ func Test_BackupProviderLocal(t *testing.T) { t.Run("verify upload", func(t *testing.T) { for i := range backupAmount { - backupName := p.GetNextBackupName(ctx) + ".tar.gz" + backupName := p.GetNextBackupName(ctx) + compressor.Extension() backupPath := path.Join(constants.UploadDir, backupName) backupContent := fmt.Sprintf("precious data %d", i+1) err = afero.WriteFile(fs, backupPath, []byte(backupContent), 0600) require.NoError(t, err) - err = p.UploadBackup(ctx, backupPath) + infile, err := fs.Open(backupPath) + require.NoError(t, err) + + err = p.UploadBackup(ctx, infile) require.NoError(t, err) localPath := path.Join(localProviderBackupPath, backupName) - _, err := fs.Stat(localPath) + _, err = fs.Stat(localPath) require.NoError(t, err) backupFiles, err := afero.ReadDir(fs, localProviderBackupPath) @@ -130,16 +137,19 @@ func Test_BackupProviderLocal(t *testing.T) { latestVersion := versions.Latest() require.NotNil(t, latestVersion) - backupFilePath, err := p.DownloadBackup(ctx, latestVersion, "") + outputFile, err := fs.Create("output.tar.gz") + require.NoError(t, err) + + err = p.DownloadBackup(ctx, latestVersion, outputFile) require.NoError(t, err) - gotContent, err := afero.ReadFile(fs, backupFilePath) + gotContent, err := afero.ReadFile(fs, outputFile.Name()) require.NoError(t, err) require.Equal(t, fmt.Sprintf("precious data %d", backupAmount), string(gotContent)) // cleaning up after test - err = fs.Remove(backupFilePath) + err = fs.Remove(outputFile.Name()) require.NoError(t, err) }) diff --git a/cmd/internal/backup/providers/s3/s3.go b/cmd/internal/backup/providers/s3/s3.go index 7634a95..3e2c646 100644 --- a/cmd/internal/backup/providers/s3/s3.go +++ b/cmd/internal/backup/providers/s3/s3.go @@ -2,13 +2,16 @@ package s3 import ( "context" + "io" "log/slog" - "path/filepath" - "strings" + "strconv" "errors" "github.com/metal-stack/backup-restore-sidecar/cmd/internal/backup/providers" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/encryption" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/utils" "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/spf13/afero" @@ -21,16 +24,18 @@ import ( ) const ( - defaultBackupName = "db" + ProviderConstant = "db" ) // BackupProviderS3 implements the backup provider interface for S3 type BackupProviderS3 struct { - fs afero.Fs - log *slog.Logger - c *s3.S3 - sess *session.Session - config *BackupProviderConfigS3 + fs afero.Fs + log *slog.Logger + c *s3.S3 + sess *session.Session + config *BackupProviderConfigS3 + encrypter *encryption.Encrypter + compressor *compress.Compressor } // BackupProviderConfigS3 provides configuration for the BackupProviderS3 @@ -44,6 +49,8 @@ type BackupProviderConfigS3 struct { ObjectPrefix string ObjectsToKeep int64 FS afero.Fs + Encrypter *encryption.Encrypter + Compressor *compress.Compressor } func (c *BackupProviderConfigS3) validate() error { @@ -72,9 +79,6 @@ func New(log *slog.Logger, config *BackupProviderConfigS3) (*BackupProviderS3, e if config.ObjectsToKeep == 0 { config.ObjectsToKeep = constants.DefaultObjectsToKeep } - if config.BackupName == "" { - config.BackupName = defaultBackupName - } if config.FS == nil { config.FS = afero.NewOsFs() } @@ -96,11 +100,13 @@ func New(log *slog.Logger, config *BackupProviderConfigS3) (*BackupProviderS3, e client := s3.New(newSession) return &BackupProviderS3{ - c: client, - sess: newSession, - config: config, - log: log, - fs: config.FS, + c: client, + sess: newSession, + config: config, + log: log, + fs: config.FS, + encrypter: config.Encrypter, + compressor: config.Compressor, }, nil } @@ -189,61 +195,59 @@ func (b *BackupProviderS3) CleanupBackups(_ context.Context) error { } // DownloadBackup downloads the given backup version to the specified folder -func (b *BackupProviderS3) DownloadBackup(ctx context.Context, version *providers.BackupVersion, outDir string) (string, error) { - bucket := aws.String(b.config.BucketName) - - downloadFileName := version.Name - if strings.Contains(downloadFileName, "/") { - downloadFileName = filepath.Base(downloadFileName) - } - - backupFilePath := filepath.Join(outDir, downloadFileName) - - f, err := b.fs.Create(backupFilePath) +func (b *BackupProviderS3) DownloadBackup(ctx context.Context, version *providers.BackupVersion, writer io.Writer) error { + gen, err := strconv.ParseInt(version.Version, 10, 64) if err != nil { - return "", err + return err } - defer f.Close() + + bucket := aws.String(b.config.BucketName) downloader := s3manager.NewDownloader(b.sess) + // we need to download the backup sequentially since we fake the download with a io.Writer instead of io.WriterAt + downloader.Concurrency = 1 + + b.log.Info("downloading", "object", version.Name, "get", gen) + streamWriter := utils.NewSequentialWriterAt(writer) _, err = downloader.DownloadWithContext( ctx, - f, + streamWriter, &s3.GetObjectInput{ Bucket: bucket, Key: &version.Name, VersionId: &version.Version, }) if err != nil { - return "", err + return err } - return backupFilePath, nil + return nil } // UploadBackup uploads a backup to the backup provider -func (b *BackupProviderS3) UploadBackup(ctx context.Context, sourcePath string) error { +func (b *BackupProviderS3) UploadBackup(ctx context.Context, reader io.Reader) error { bucket := aws.String(b.config.BucketName) - r, err := b.fs.Open(sourcePath) - if err != nil { - return err + destination := ProviderConstant + if b.compressor != nil { + destination += b.compressor.Extension() + } + if b.encrypter != nil { + destination += b.encrypter.Extension() } - defer r.Close() - destination := filepath.Base(sourcePath) if b.config.ObjectPrefix != "" { destination = b.config.ObjectPrefix + "/" + destination } - b.log.Debug("uploading object", "src", sourcePath, "dest", destination) + b.log.Debug("uploading object", "dest", destination) uploader := s3manager.NewUploader(b.sess) - _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ + _, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: bucket, Key: aws.String(destination), - Body: r, + Body: reader, }) if err != nil { return err diff --git a/cmd/internal/backup/providers/s3/s3_integration_test.go b/cmd/internal/backup/providers/s3/s3_integration_test.go index 20c3e03..5a1a67b 100644 --- a/cmd/internal/backup/providers/s3/s3_integration_test.go +++ b/cmd/internal/backup/providers/s3/s3_integration_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/metal-stack/backup-restore-sidecar/cmd/internal/compress" "github.com/metal-stack/backup-restore-sidecar/pkg/constants" "github.com/spf13/afero" "github.com/stretchr/testify/assert" @@ -49,12 +50,15 @@ func Test_BackupProviderS3(t *testing.T) { var ( endpoint = conn.Endpoint backupAmount = 5 - expectedBackupName = defaultBackupName + ".tar.gz" + expectedBackupName = "db.tar.gz" prefix = fmt.Sprintf("test-with-%d", backupAmount) fs = afero.NewMemMapFs() ) + compressor, err := compress.New("targz") + require.NoError(t, err) + p, err := New(log, &BackupProviderConfigS3{ BucketName: "test", Endpoint: endpoint, @@ -63,6 +67,7 @@ func Test_BackupProviderS3(t *testing.T) { SecretKey: "SECRETKEY", ObjectPrefix: prefix, FS: fs, + Compressor: compressor, }) require.NoError(t, err) require.NotNil(t, p) @@ -87,7 +92,9 @@ func Test_BackupProviderS3(t *testing.T) { err = afero.WriteFile(fs, backupPath, []byte(backupContent), 0600) require.NoError(t, err) - err = p.UploadBackup(ctx, backupPath) + backupFile, err := fs.Open(backupPath) + require.NoError(t, err) + err = p.UploadBackup(ctx, backupFile) require.NoError(t, err) // cleaning up after test @@ -147,17 +154,19 @@ func Test_BackupProviderS3(t *testing.T) { latestVersion := versions.Latest() require.NotNil(t, latestVersion) - backupFilePath, err := p.DownloadBackup(ctx, latestVersion, "") + outputFile, err := fs.Create("outputfile") + require.NoError(t, err) + err = p.DownloadBackup(ctx, latestVersion, outputFile) require.NoError(t, err) - gotContent, err := afero.ReadFile(fs, backupFilePath) + gotContent, err := afero.ReadFile(fs, outputFile.Name()) require.NoError(t, err) backupContent := fmt.Sprintf("precious data %d", backupAmount-1) require.Equal(t, backupContent, string(gotContent)) // cleaning up after test - err = fs.Remove(backupFilePath) + err = fs.Remove(outputFile.Name()) require.NoError(t, err) }) diff --git a/cmd/internal/compress/compress.go b/cmd/internal/compress/compress.go index b7386d7..e164490 100644 --- a/cmd/internal/compress/compress.go +++ b/cmd/internal/compress/compress.go @@ -42,7 +42,7 @@ func (c *Compressor) Decompress(backupFilePath string) error { return archiver.Unarchive(backupFilePath, filepath.Dir(constants.RestoreDir)) } -// Extension returns the file extension of the configured compressor, depending on the method +// Extension returns the file extension of the configured compressor, depending on the methodn func (c *Compressor) Extension() string { return c.extension } diff --git a/cmd/internal/encryption/encryption.go b/cmd/internal/encryption/encryption.go index 65b0c85..9fa2017 100644 --- a/cmd/internal/encryption/encryption.go +++ b/cmd/internal/encryption/encryption.go @@ -236,3 +236,7 @@ func (e *Encrypter) decryptFile(infile, outfile afero.File, block cipher.Block, return nil } + +func (e *Encrypter) Extension() string { + return suffix +} diff --git a/cmd/internal/initializer/initializer.go b/cmd/internal/initializer/initializer.go index 2f11fbb..f7f7363 100644 --- a/cmd/internal/initializer/initializer.go +++ b/cmd/internal/initializer/initializer.go @@ -212,7 +212,15 @@ func (i *Initializer) Restore(ctx context.Context, version *providers.BackupVers return fmt.Errorf("could not delete priorly downloaded file: %w", err) } - backupFilePath, err := i.bp.DownloadBackup(ctx, version, constants.DownloadDir) + outputFile, err := os.Create(backupFilePath) + if err != nil { + return fmt.Errorf("could not open file for writing: %w", err) + } + defer outputFile.Close() + + i.log.Info("downloading backup", "version", version.Version, "path", backupFilePath) + + err = i.bp.DownloadBackup(ctx, version, outputFile) if err != nil { return fmt.Errorf("unable to download backup: %w", err) } diff --git a/cmd/internal/utils/sequentialWriterAt.go b/cmd/internal/utils/sequentialWriterAt.go new file mode 100644 index 0000000..992bf71 --- /dev/null +++ b/cmd/internal/utils/sequentialWriterAt.go @@ -0,0 +1,15 @@ +package utils + +import "io" + +type SequentialWriterAt struct { + w io.Writer +} + +func NewSequentialWriterAt(w io.Writer) *SequentialWriterAt { + return &SequentialWriterAt{w: w} +} + +func (s *SequentialWriterAt) WriteAt(p []byte, off int64) (n int, err error) { + return s.w.Write(p) +} diff --git a/cmd/main.go b/cmd/main.go index 74f9414..06b5fe3 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -7,6 +7,7 @@ import ( "log/slog" "os" "os/signal" + "path/filepath" "strings" v1 "github.com/metal-stack/backup-restore-sidecar/api/v1" @@ -101,12 +102,13 @@ const ( ) var ( - cfgFile string - logger *slog.Logger - db database.Database - bp providers.BackupProvider - encrypter *encryption.Encrypter - stop context.Context + cfgFile string + logger *slog.Logger + db database.Database + bp providers.BackupProvider + encrypter *encryption.Encrypter + compressor *compress.Compressor + stop context.Context ) var rootCmd = &cobra.Command{ @@ -133,6 +135,9 @@ var startCmd = &cobra.Command{ if err := initEncrypter(); err != nil { return err } + if err := initCompressor(); err != nil { + return err + } return initBackupProvider() }, RunE: func(cmd *cobra.Command, args []string) error { @@ -151,11 +156,6 @@ var startCmd = &cobra.Command{ logger.Info("starting backup-restore-sidecar", "version", v.V, "bind-addr", addr) - comp, err := compress.New(viper.GetString(compressionMethod)) - if err != nil { - return err - } - metrics := metrics.New() metrics.Start(logger.WithGroup("metrics")) @@ -165,11 +165,11 @@ var startCmd = &cobra.Command{ DatabaseProber: db, BackupProvider: bp, Metrics: metrics, - Compressor: comp, + Compressor: compressor, Encrypter: encrypter, }) - if err := initializer.New(logger.WithGroup("initializer"), addr, db, bp, comp, metrics, viper.GetString(databaseDatadirFlg), encrypter).Start(stop, backuper); err != nil { + if err := initializer.New(logger.WithGroup("initializer"), addr, db, bp, compressor, metrics, viper.GetString(databaseDatadirFlg), encrypter).Start(stop, backuper); err != nil { return err } @@ -300,15 +300,21 @@ var downloadBackupCmd = &cobra.Command{ output := viper.GetString(downloadOutputFlg) - destination, err := bp.DownloadBackup(context.Background(), &providers.BackupVersion{Name: backup.GetBackup().GetName()}, output) + outputPath := filepath.Join(output, backup.GetBackup().GetName()) + outputFile, err := os.Open(outputPath) + if err != nil { + return fmt.Errorf("failed opening output file: %w", err) + } + + err = bp.DownloadBackup(context.Background(), &providers.BackupVersion{Name: backup.GetBackup().GetName()}, outputFile) if err != nil { return fmt.Errorf("failed downloading backup: %w", err) } if encrypter != nil { - if encryption.IsEncrypted(destination) { - _, err = encrypter.Decrypt(destination) + if encryption.IsEncrypted(outputPath) { + _, err = encrypter.Decrypt(outputPath) if err != nil { return fmt.Errorf("unable to decrypt backup: %w", err) } @@ -556,6 +562,19 @@ func initEncrypter() error { return nil } +func initCompressor() error { + var err error + key := viper.GetString(compressionMethod) + if key != "" { + compressor, err = compress.New(key) + if err != nil { + return fmt.Errorf("unable to initialize compressor: %w", err) + } + logger.Info("initialized compressor") + } + return nil +} + func initBackupProvider() error { bpString := viper.GetString(backupProviderFlg) var err error @@ -591,6 +610,8 @@ func initBackupProvider() error { &local.BackupProviderConfigLocal{ LocalBackupPath: viper.GetString(localBackupPathFlg), ObjectsToKeep: viper.GetInt64(objectsToKeepFlg), + Encrypter: encrypter, + Compressor: compressor, }, ) default: