From dca9b6f5dbd28f9c7e7134e7edd0a8eba0233ee2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Dec 2020 23:36:45 +0100 Subject: [PATCH 1/4] azure: explicitly pass upload size Previously the fallback from the azure library was to read the whole blob into memory and use that to determine the upload size. --- internal/backend/azure/azure.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 33162c227..53064702f 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "io" - "io/ioutil" "net/http" "os" "path" @@ -118,6 +117,16 @@ func (be *Backend) Path() string { return be.prefix } +type azureAdapter struct { + restic.RewindReader +} + +func (azureAdapter) Close() error { return nil } + +func (a *azureAdapter) Len() int { + return int(a.Length()) +} + // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { if err := h.Valid(); err != nil { @@ -135,7 +144,8 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe var err error if rd.Length() < 256*1024*1024 { // wrap the reader so that net/http client cannot close the reader - dataReader := ioutil.NopCloser(rd) + // CreateBlockBlobFromReader reads length from `Len()`` + dataReader := azureAdapter{rd} // if it's smaller than 256miB, then just create the file directly from the reader err = be.container.GetBlobReference(objName).CreateBlockBlobFromReader(dataReader, nil) From 4526d5d197acaf6f338694364a2073069f639bc6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Dec 2020 23:38:09 +0100 Subject: [PATCH 2/4] swift: explicitly pass upload size to library This allows properly setting the content-length which could help the server-side to detect incomplete uploads. --- internal/backend/swift/swift.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 488ccbd14..92b6567e3 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "path" + "strconv" "strings" "time" @@ -176,7 +177,9 @@ func (be *beSwift) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe encoding := "binary/octet-stream" debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding) - _, err := be.conn.ObjectPut(be.container, objName, rd, true, "", encoding, nil) + hdr := swift.Headers{"Content-Length": strconv.FormatInt(rd.Length(), 10)} + _, err := be.conn.ObjectPut(be.container, objName, rd, true, "", encoding, hdr) + // swift does not return the upload length debug.Log("%v, err %#v", objName, err) return errors.Wrap(err, "client.PutObject") From c73316a111364f45185cd61a1ccc66cea9a85b25 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Dec 2020 23:41:29 +0100 Subject: [PATCH 3/4] backends: add sanity check for the uploaded file size Bugs in the error handling while uploading a file to the backend could cause incomplete files, e.g. https://github.com/golang/go/issues/42400 which could affect the local backend. Proactively add sanity checks which will treat an upload as failed if the reported upload size does not match the actual file size. --- internal/backend/azure/azure.go | 7 +++++++ internal/backend/b2/b2.go | 4 ++++ internal/backend/gs/gs.go | 4 ++++ internal/backend/local/local.go | 7 ++++++- internal/backend/mem/mem_backend.go | 5 +++++ internal/backend/s3/s3.go | 9 +++++++-- internal/backend/sftp/sftp.go | 8 +++++++- 7 files changed, 40 insertions(+), 4 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 53064702f..f0045c8aa 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -172,6 +172,7 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.Rewi // read the data, in 100 MiB chunks buf := make([]byte, 100*1024*1024) var blocks []storage.Block + uploadedBytes := 0 for { n, err := io.ReadFull(rd, buf) @@ -188,6 +189,7 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.Rewi } buf = buf[:n] + uploadedBytes += n // upload it as a new "block", use the base64 hash for the ID h := restic.Hash(buf) @@ -204,6 +206,11 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.Rewi }) } + // sanity check + if uploadedBytes != int(rd.Length()) { + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length()) + } + debug.Log("uploaded %d parts: %v", len(blocks), blocks) err = file.PutBlockList(blocks, nil) debug.Log("PutBlockList returned %v", err) diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index f6d4331c8..471ff1c62 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -209,6 +209,10 @@ func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd restic.Rewind return errors.Wrap(err, "Copy") } + // sanity check + if n != rd.Length() { + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", n, rd.Length()) + } return errors.Wrap(w.Close(), "Close") } diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index 0b0cea8be..7e4ac0702 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -245,6 +245,10 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe } debug.Log("%v -> %v bytes", objName, wbytes) + // sanity check + if wbytes != rd.Length() { + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length()) + } return nil } diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index c195bdc07..f1b658864 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -118,11 +118,16 @@ func (b *Local) Save(ctx context.Context, h restic.Handle, rd restic.RewindReade } // save data, then sync - _, err = io.Copy(f, rd) + wbytes, err := io.Copy(f, rd) if err != nil { _ = f.Close() return errors.Wrap(err, "Write") } + // sanity check + if wbytes != rd.Length() { + _ = f.Close() + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length()) + } if err = f.Sync(); err != nil { pathErr, ok := err.(*os.PathError) diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 8378630fa..950ae122d 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -84,6 +84,11 @@ func (be *MemoryBackend) Save(ctx context.Context, h restic.Handle, rd restic.Re be.data[h] = buf debug.Log("saved %v bytes at %v", len(buf), h) + // sanity check + if int64(len(buf)) != rd.Length() { + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", len(buf), rd.Length()) + } + return ctx.Err() } diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 83d784813..8a8494623 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -272,9 +272,14 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe opts.ContentType = "application/octet-stream" debug.Log("PutObject(%v, %v, %v)", be.cfg.Bucket, objName, rd.Length()) - n, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, ioutil.NopCloser(rd), int64(rd.Length()), opts) + info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, ioutil.NopCloser(rd), int64(rd.Length()), opts) - debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err) + debug.Log("%v -> %v bytes, err %#v: %v", objName, info.Size, err, err) + + // sanity check + if err != nil && info.Size != rd.Length() { + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", info.Size, rd.Length()) + } return errors.Wrap(err, "client.PutObject") } diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 7fa4b7319..3d5ee3ba4 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -288,12 +288,18 @@ func (r *SFTP) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader } // save data, make sure to use the optimized sftp upload method - _, err = f.ReadFrom(rd) + wbytes, err := f.ReadFrom(rd) if err != nil { _ = f.Close() return errors.Wrap(err, "Write") } + // sanity check + if wbytes != rd.Length() { + _ = f.Close() + return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length()) + } + err = f.Close() if err != nil { return errors.Wrap(err, "Close") From 1f583b3d8e5c8029636527305dbdc06024cecda0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 18 Dec 2020 23:53:15 +0100 Subject: [PATCH 4/4] backend: test that incomplete uploads fail --- internal/backend/test/tests.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go index 92a63fba2..a35b75e5a 100644 --- a/internal/backend/test/tests.go +++ b/internal/backend/test/tests.go @@ -557,6 +557,38 @@ func (s *Suite) TestSave(t *testing.T) { } } +type incompleteByteReader struct { + restic.ByteReader +} + +func (r *incompleteByteReader) Length() int64 { + return r.ByteReader.Length() + 42 +} + +// TestSaveError tests saving data in the backend. +func (s *Suite) TestSaveError(t *testing.T) { + seedRand(t) + + b := s.open(t) + defer func() { + // rclone will report an error when closing the backend. We have to ignore it + // otherwise this test will always fail + _ = b.Close() + }() + + length := rand.Intn(1<<23) + 200000 + data := test.Random(23, length) + var id restic.ID + copy(id[:], data) + + // test that incomplete uploads fail + h := restic.Handle{Type: restic.PackFile, Name: id.String()} + err := b.Save(context.TODO(), h, &incompleteByteReader{ByteReader: *restic.NewByteReader(data)}) + if err == nil { + t.Fatal("incomplete upload did not fail") + } +} + var filenameTests = []struct { name string data string