Skip to content

Commit

Permalink
Merge pull request #4624 from MichaelEischer/better-restorer-error-re…
Browse files Browse the repository at this point in the history
…porting

Improver restorer error reporting
  • Loading branch information
MichaelEischer committed Jan 7, 2024
2 parents f8b4e93 + 1008723 commit e4a7eb0
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 15 deletions.
11 changes: 11 additions & 0 deletions changelog/unreleased/pull-4624
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
Bugfix: Correct restore progress information if an error occurs

If an error occurred while restoring a snapshot, this could cause the restore
progress bar to show incorrect information. In addition, if a data file could
not be loaded completely, then errors would also be reported for some already
restored files.

We have improved the error reporting of the restore command to be more accurate.

https://github.com/restic/restic/pull/4624
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943
12 changes: 8 additions & 4 deletions internal/repository/repository.go
Original file line number Diff line number Diff line change
Expand Up @@ -881,9 +881,9 @@ type BackendLoadFn func(ctx context.Context, h backend.Handle, length int, offse
const maxUnusedRange = 4 * 1024 * 1024

// StreamPack loads the listed blobs from the specified pack file. The plaintext blob is passed to
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match. In
// case of download errors handleBlobFn might be called multiple times for the same blob. If the
// callback returns an error, then StreamPack will abort and not retry it.
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match.
// handleBlobFn is never called multiple times for the same blob. If the callback returns an error,
// then StreamPack will abort and not retry it.
func StreamPack(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
if len(blobs) == 0 {
// nothing to do
Expand Down Expand Up @@ -945,7 +945,9 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
currentBlobEnd := dataStart
var buf []byte
var decode []byte
for _, entry := range blobs {
for len(blobs) > 0 {
entry := blobs[0]

skipBytes := int(entry.Offset - currentBlobEnd)
if skipBytes < 0 {
return errors.Errorf("overlapping blobs in pack %v", packID)
Expand Down Expand Up @@ -1008,6 +1010,8 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
cancel()
return backoff.Permanent(err)
}
// ensure that each blob is only passed once to handleBlobFn
blobs = blobs[1:]
}
return nil
})
Expand Down
50 changes: 40 additions & 10 deletions internal/repository/repository_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
Expand All @@ -14,6 +15,7 @@ import (
"testing"
"time"

"github.com/cenkalti/backoff/v4"
"github.com/google/go-cmp/cmp"
"github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/backend"
Expand Down Expand Up @@ -529,7 +531,9 @@ func testStreamPack(t *testing.T, version uint) {
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)

loadCalls := 0
load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
shortFirstLoad := false

loadBytes := func(length int, offset int64) []byte {
data := packfile

if offset > int64(len(data)) {
Expand All @@ -541,32 +545,56 @@ func testStreamPack(t *testing.T, version uint) {
if length > len(data) {
length = len(data)
}
if shortFirstLoad {
length /= 2
shortFirstLoad = false
}

return data[:length]
}

load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
data := loadBytes(length, offset)
if shortFirstLoad {
data = data[:len(data)/2]
shortFirstLoad = false
}

data = data[:length]
loadCalls++

return fn(bytes.NewReader(data))
err := fn(bytes.NewReader(data))
if err == nil {
return nil
}
var permanent *backoff.PermanentError
if errors.As(err, &permanent) {
return err
}

// retry loading once
return fn(bytes.NewReader(loadBytes(length, offset)))
}

// first, test regular usage
t.Run("regular", func(t *testing.T) {
tests := []struct {
blobs []restic.Blob
calls int
blobs []restic.Blob
calls int
shortFirstLoad bool
}{
{packfileBlobs[1:2], 1},
{packfileBlobs[2:5], 1},
{packfileBlobs[2:8], 1},
{packfileBlobs[1:2], 1, false},
{packfileBlobs[2:5], 1, false},
{packfileBlobs[2:8], 1, false},
{[]restic.Blob{
packfileBlobs[0],
packfileBlobs[4],
packfileBlobs[2],
}, 1},
}, 1, false},
{[]restic.Blob{
packfileBlobs[0],
packfileBlobs[len(packfileBlobs)-1],
}, 2},
}, 2, false},
{packfileBlobs[:], 1, true},
}

for _, test := range tests {
Expand All @@ -593,6 +621,7 @@ func testStreamPack(t *testing.T, version uint) {
}

loadCalls = 0
shortFirstLoad = test.shortFirstLoad
err = repository.StreamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob)
if err != nil {
t.Fatal(err)
Expand All @@ -605,6 +634,7 @@ func testStreamPack(t *testing.T, version uint) {
})
}
})
shortFirstLoad = false

// next, test invalid uses, which should return an error
t.Run("invalid", func(t *testing.T) {
Expand Down
17 changes: 16 additions & 1 deletion internal/restorer/filerestorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,10 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
return err
}

// track already processed blobs for precise error reporting
processedBlobs := restic.NewBlobSet()
err := repository.StreamPack(ctx, r.packLoader, r.key, pack.id, blobList, func(h restic.BlobHandle, blobData []byte, err error) error {
processedBlobs.Insert(h)
blob := blobs[h.ID]
if err != nil {
for file := range blob.files {
Expand Down Expand Up @@ -292,7 +295,19 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
})

if err != nil {
for file := range pack.files {
// only report error for not yet processed blobs
affectedFiles := make(map[*fileInfo]struct{})
for _, blob := range blobList {
if processedBlobs.Has(blob.BlobHandle) {
continue
}
blob := blobs[blob.ID]
for file := range blob.files {
affectedFiles[file] = struct{}{}
}
}

for file := range affectedFiles {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
Expand Down
44 changes: 44 additions & 0 deletions internal/restorer/filerestorer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,3 +317,47 @@ func testPartialDownloadError(t *testing.T, part int) {
rtest.OK(t, err)
verifyRestore(t, r, repo)
}

func TestFatalDownloadError(t *testing.T) {
tempdir := rtest.TempDir(t)
content := []TestFile{
{
name: "file1",
blobs: []TestBlob{
{"data1-1", "pack1"},
{"data1-2", "pack1"},
},
},
{
name: "file2",
blobs: []TestBlob{
{"data2-1", "pack1"},
{"data2-2", "pack1"},
{"data2-3", "pack1"},
},
}}

repo := newTestRepo(content)

loader := repo.loader
repo.loader = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// only return half the data to break file2
return loader(ctx, h, length/2, offset, fn)
}

r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false, nil)
r.files = repo.files

var errors []string
r.Error = func(s string, e error) error {
// ignore errors as in the `restore` command
errors = append(errors, s)
return nil
}

err := r.restoreFiles(context.TODO())
rtest.OK(t, err)

rtest.Assert(t, len(errors) == 1, "unexpected number of restore errors, expected: 1, got: %v", len(errors))
rtest.Assert(t, errors[0] == "file2", "expected error for file2, got: %v", errors[0])
}

0 comments on commit e4a7eb0

Please sign in to comment.