diff --git a/internal/archiver/archive_reader.go b/internal/archiver/archive_reader.go deleted file mode 100644 index fa00a7406..000000000 --- a/internal/archiver/archive_reader.go +++ /dev/null @@ -1,117 +0,0 @@ -package archiver - -import ( - "context" - "io" - "time" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/restic" - - "github.com/restic/restic/internal/errors" - - "github.com/restic/chunker" -) - -// Reader allows saving a stream of data to the repository. -type Reader struct { - restic.Repository - - Tags []string - Hostname string - TimeStamp time.Time -} - -// Archive reads data from the reader and saves it to the repo. -func (r *Reader) Archive(ctx context.Context, name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) { - if name == "" { - return nil, restic.ID{}, errors.New("no filename given") - } - debug.Log("start archiving %s", name) - sn, err := restic.NewSnapshot([]string{name}, r.Tags, r.Hostname, r.TimeStamp) - if err != nil { - return nil, restic.ID{}, err - } - - p.Start() - defer p.Done() - - repo := r.Repository - chnker := chunker.New(rd, repo.Config().ChunkerPolynomial) - - ids := restic.IDs{} - var fileSize uint64 - - for { - chunk, err := chnker.Next(getBuf()) - if errors.Cause(err) == io.EOF { - break - } - - if err != nil { - return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()") - } - - id := restic.Hash(chunk.Data) - - if !repo.Index().Has(id, restic.DataBlob) { - _, err := repo.SaveBlob(ctx, restic.DataBlob, chunk.Data, id) - if err != nil { - return nil, restic.ID{}, err - } - debug.Log("saved blob %v (%d bytes)\n", id, chunk.Length) - } else { - debug.Log("blob %v already saved in the repo\n", id) - } - - freeBuf(chunk.Data) - - ids = append(ids, id) - - p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) - fileSize += uint64(chunk.Length) - } - - tree := &restic.Tree{ - Nodes: []*restic.Node{ - { - Name: name, - AccessTime: time.Now(), - ModTime: time.Now(), - Type: "file", - Mode: 0644, - Size: fileSize, - UID: sn.UID, - GID: sn.GID, - User: sn.Username, - Content: ids, - }, - }, - } - - treeID, err := repo.SaveTree(ctx, tree) - if err != nil { - return nil, restic.ID{}, err - } - sn.Tree = &treeID - debug.Log("tree saved as %v", treeID) - - id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) - if err != nil { - return nil, restic.ID{}, err - } - - debug.Log("snapshot saved as %v", id) - - err = repo.Flush(ctx) - if err != nil { - return nil, restic.ID{}, err - } - - err = repo.SaveIndex(ctx) - if err != nil { - return nil, restic.ID{}, err - } - - return sn, id, nil -} diff --git a/internal/archiver/archive_reader_test.go b/internal/archiver/archive_reader_test.go deleted file mode 100644 index d0fdb06cf..000000000 --- a/internal/archiver/archive_reader_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package archiver - -import ( - "bytes" - "context" - "errors" - "io" - "math/rand" - "testing" - - "github.com/restic/restic/internal/checker" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" -) - -func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int { - n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) - if err != nil { - t.Fatalf("LoadBlob(%v) returned error %v", id, err) - } - - return n -} - -func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) { - tree, err := repo.LoadTree(context.TODO(), treeID) - if err != nil { - t.Fatalf("LoadTree() returned error %v", err) - } - - if len(tree.Nodes) != 1 { - t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes)) - } - - node := tree.Nodes[0] - if node.Name != "fakefile" { - t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name) - } - - if len(node.Content) == 0 { - t.Fatalf("node.Content has length 0") - } - - // check blobs - for i, id := range node.Content { - size, found := repo.LookupBlobSize(id, restic.DataBlob) - if !found { - t.Fatal("Failed to find blob", id.Str()) - } - - buf := restic.NewBlobBuffer(int(size)) - n := loadBlob(t, repo, id, buf) - if n != len(buf) { - t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n) - } - - buf2 := make([]byte, int(size)) - _, err := io.ReadFull(rd, buf2) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(buf, buf2) { - t.Fatalf("blob %d (%v) is wrong", i, id.Str()) - } - } -} - -// fakeFile returns a reader which yields deterministic pseudo-random data. -func fakeFile(t testing.TB, seed, size int64) io.Reader { - return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size) -} - -func TestArchiveReader(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - seed := rand.Int63() - size := int64(rand.Intn(50*1024*1024) + 50*1024*1024) - t.Logf("seed is 0x%016x, size is %v", seed, size) - - f := fakeFile(t, seed, size) - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - sn, id, err := r.Archive(context.TODO(), "fakefile", f, nil) - if err != nil { - t.Fatalf("ArchiveReader() returned error %v", err) - } - - if id.IsNull() { - t.Fatalf("ArchiveReader() returned null ID") - } - - t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str()) - - checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size)) - - checker.TestCheckRepo(t, repo) -} - -func TestArchiveReaderNull(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - sn, id, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(nil), nil) - if err != nil { - t.Fatalf("ArchiveReader() returned error %v", err) - } - - if id.IsNull() { - t.Fatalf("ArchiveReader() returned null ID") - } - - t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str()) - - checker.TestCheckRepo(t, repo) -} - -type errReader string - -func (e errReader) Read([]byte) (int, error) { - return 0, errors.New(string(e)) -} - -func countSnapshots(t testing.TB, repo restic.Repository) int { - snapshots := 0 - err := repo.List(context.TODO(), restic.SnapshotFile, func(id restic.ID, size int64) error { - snapshots++ - return nil - }) - if err != nil { - t.Fatal(err) - } - return snapshots -} - -func TestArchiveReaderError(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - sn, id, err := r.Archive(context.TODO(), "fakefile", errReader("error returned by reading stdin"), nil) - if err == nil { - t.Errorf("expected error not returned") - } - - if sn != nil { - t.Errorf("Snapshot should be nil, but isn't") - } - - if !id.IsNull() { - t.Errorf("id should be null, but %v returned", id.Str()) - } - - n := countSnapshots(t, repo) - if n > 0 { - t.Errorf("expected zero snapshots, but got %d", n) - } - - checker.TestCheckRepo(t, repo) -} - -func BenchmarkArchiveReader(t *testing.B) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - const size = 50 * 1024 * 1024 - - buf := make([]byte, size) - _, err := io.ReadFull(fakeFile(t, 23, size), buf) - if err != nil { - t.Fatal(err) - } - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - t.SetBytes(size) - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, _, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(buf), nil) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go deleted file mode 100644 index 55024b53a..000000000 --- a/internal/archiver/archiver.go +++ /dev/null @@ -1,877 +0,0 @@ -package archiver - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "sync" - "time" - - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/walk" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/pipe" - - "github.com/restic/chunker" -) - -const ( - maxConcurrentBlobs = 32 - maxConcurrency = 10 -) - -var archiverPrintWarnings = func(path string, fi os.FileInfo, err error) { - fmt.Fprintf(os.Stderr, "warning for %v: %v", path, err) -} -var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true } - -// Archiver is used to backup a set of directories. -type Archiver struct { - repo restic.Repository - knownBlobs struct { - restic.IDSet - sync.Mutex - } - - blobToken chan struct{} - - Warn func(dir string, fi os.FileInfo, err error) - SelectFilter pipe.SelectFunc - Excludes []string - - WithAccessTime bool -} - -// New returns a new archiver. -func New(repo restic.Repository) *Archiver { - arch := &Archiver{ - repo: repo, - blobToken: make(chan struct{}, maxConcurrentBlobs), - knownBlobs: struct { - restic.IDSet - sync.Mutex - }{ - IDSet: restic.NewIDSet(), - }, - } - - for i := 0; i < maxConcurrentBlobs; i++ { - arch.blobToken <- struct{}{} - } - - arch.Warn = archiverPrintWarnings - arch.SelectFilter = archiverAllowAllFiles - - return arch -} - -// isKnownBlob returns true iff the blob is not yet in the list of known blobs. -// When the blob is not known, false is returned and the blob is added to the -// list. This means that the caller false is returned to is responsible to save -// the blob to the backend. -func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool { - arch.knownBlobs.Lock() - defer arch.knownBlobs.Unlock() - - if arch.knownBlobs.Has(id) { - return true - } - - arch.knownBlobs.Insert(id) - - if arch.repo.Index().Has(id, t) { - return true - } - - return false -} - -// Save stores a blob read from rd in the repository. -func (arch *Archiver) Save(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error { - debug.Log("Save(%v, %v)\n", t, id) - - if arch.isKnownBlob(id, restic.DataBlob) { - debug.Log("blob %v is known\n", id) - return nil - } - - _, err := arch.repo.SaveBlob(ctx, t, data, id) - if err != nil { - debug.Log("Save(%v, %v): error %v\n", t, id, err) - return err - } - - debug.Log("Save(%v, %v): new blob\n", t, id) - return nil -} - -// SaveTreeJSON stores a tree in the repository. -func (arch *Archiver) SaveTreeJSON(ctx context.Context, tree *restic.Tree) (restic.ID, error) { - data, err := json.Marshal(tree) - if err != nil { - return restic.ID{}, errors.Wrap(err, "Marshal") - } - data = append(data, '\n') - - // check if tree has been saved before - id := restic.Hash(data) - if arch.isKnownBlob(id, restic.TreeBlob) { - return id, nil - } - - return arch.repo.SaveBlob(ctx, restic.TreeBlob, data, id) -} - -func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) { - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - - fi, err := file.Stat() - if err != nil { - return nil, errors.Wrap(err, "restic.Stat") - } - - if fi.ModTime().Equal(node.ModTime) { - return node, nil - } - - arch.Warn(node.Path, fi, errors.New("file has changed")) - - node, err = restic.NodeFromFileInfo(node.Path, fi) - if err != nil { - debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) - arch.Warn(node.Path, fi, err) - } - - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - - return node, nil -} - -type saveResult struct { - id restic.ID - bytes uint64 -} - -func (arch *Archiver) saveChunk(ctx context.Context, chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { - defer freeBuf(chunk.Data) - - id := restic.Hash(chunk.Data) - err := arch.Save(ctx, restic.DataBlob, chunk.Data, id) - // TODO handle error - if err != nil { - debug.Log("Save(%v) failed: %v", id, err) - fmt.Printf("\nerror while saving data to the repo: %+v\n", err) - panic(err) - } - - p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) - arch.blobToken <- token - resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} -} - -func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error) { - results := []saveResult{} - - for _, ch := range resultChannels { - results = append(results, <-ch) - } - - if len(results) != len(resultChannels) { - return nil, errors.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results)) - } - - return results, nil -} - -func updateNodeContent(node *restic.Node, results []saveResult) error { - debug.Log("checking size for file %s", node.Path) - - var bytes uint64 - node.Content = make([]restic.ID, len(results)) - - for i, b := range results { - node.Content[i] = b.id - bytes += b.bytes - - debug.Log(" adding blob %s, %d bytes", b.id, b.bytes) - } - - if bytes != node.Size { - fmt.Fprintf(os.Stderr, "warning for %v: expected %d bytes, saved %d bytes\n", node.Path, node.Size, bytes) - } - - debug.Log("SaveFile(%q): %v blobs\n", node.Path, len(results)) - - return nil -} - -// SaveFile stores the content of the file on the backend as a Blob by calling -// Save for each chunk. -func (arch *Archiver) SaveFile(ctx context.Context, p *restic.Progress, node *restic.Node) (*restic.Node, error) { - file, err := fs.Open(node.Path) - if err != nil { - return node, errors.Wrap(err, "Open") - } - defer file.Close() - - debug.RunHook("archiver.SaveFile", node.Path) - - node, err = arch.reloadFileIfChanged(node, file) - if err != nil { - return node, err - } - - chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial) - resultChannels := [](<-chan saveResult){} - - for { - chunk, err := chnker.Next(getBuf()) - if errors.Cause(err) == io.EOF { - break - } - - if err != nil { - return node, errors.Wrap(err, "chunker.Next") - } - - resCh := make(chan saveResult, 1) - go arch.saveChunk(ctx, chunk, p, <-arch.blobToken, file, resCh) - resultChannels = append(resultChannels, resCh) - } - - results, err := waitForResults(resultChannels) - if err != nil { - return node, err - } - err = updateNodeContent(node, results) - - return node, err -} - -func (arch *Archiver) fileWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, entCh <-chan pipe.Entry) { - defer func() { - debug.Log("done") - wg.Done() - }() - for { - select { - case e, ok := <-entCh: - if !ok { - // channel is closed - return - } - - debug.Log("got job %v", e) - - // check for errors - if e.Error() != nil { - debug.Log("job %v has errors: %v", e.Path(), e.Error()) - // TODO: integrate error reporting - fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error()) - // ignore this file - e.Result() <- nil - p.Report(restic.Stat{Errors: 1}) - continue - } - - node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info()) - if err != nil { - debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) - arch.Warn(e.Fullpath(), e.Info(), err) - } - - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - - // try to use old node, if present - if e.Node != nil { - debug.Log(" %v use old data", e.Path()) - - oldNode := e.Node.(*restic.Node) - // check if all content is still available in the repository - contentMissing := false - for _, blob := range oldNode.Content { - if !arch.repo.Index().Has(blob, restic.DataBlob) { - debug.Log(" %v not using old data, %v is missing", e.Path(), blob) - contentMissing = true - break - } - } - - if !contentMissing { - node.Content = oldNode.Content - debug.Log(" %v content is complete", e.Path()) - } - } else { - debug.Log(" %v no old data", e.Path()) - } - - // otherwise read file normally - if node.Type == "file" && len(node.Content) == 0 { - debug.Log(" read and save %v", e.Path()) - node, err = arch.SaveFile(ctx, p, node) - if err != nil { - fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err) - arch.Warn(e.Path(), nil, err) - // ignore this file - e.Result() <- nil - p.Report(restic.Stat{Errors: 1}) - continue - } - } else { - // report old data size - p.Report(restic.Stat{Bytes: node.Size}) - } - - debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content)) - e.Result() <- node - p.Report(restic.Stat{Files: 1}) - case <-ctx.Done(): - // pipeline was cancelled - return - } - } -} - -func (arch *Archiver) dirWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, dirCh <-chan pipe.Dir) { - debug.Log("start") - defer func() { - debug.Log("done") - wg.Done() - }() - for { - select { - case dir, ok := <-dirCh: - if !ok { - // channel is closed - return - } - debug.Log("save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error()) - - // ignore dir nodes with errors - if dir.Error() != nil { - fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error()) - dir.Result() <- nil - p.Report(restic.Stat{Errors: 1}) - continue - } - - tree := restic.NewTree() - - // wait for all content - for _, ch := range dir.Entries { - debug.Log("receiving result from %v", ch) - res := <-ch - - // if we get a nil pointer here, an error has happened while - // processing this entry. Ignore it for now. - if res == nil { - debug.Log("got nil result?") - continue - } - - // else insert node - node := res.(*restic.Node) - - if node.Type == "dir" { - debug.Log("got tree node for %s: %v", node.Path, node.Subtree) - - if node.Subtree == nil { - debug.Log("subtree is nil for node %v", node.Path) - continue - } - - if node.Subtree.IsNull() { - panic("invalid null subtree restic.ID") - } - } - - // insert node into tree, resolve name collisions - name := node.Name - i := 0 - for { - i++ - err := tree.Insert(node) - if err == nil { - break - } - - newName := fmt.Sprintf("%v-%d", name, i) - fmt.Fprintf(os.Stderr, "%v: name collision for %q, renaming to %q\n", filepath.Dir(node.Path), node.Name, newName) - node.Name = newName - } - - } - - node := &restic.Node{} - - if dir.Path() != "" && dir.Info() != nil { - n, err := restic.NodeFromFileInfo(dir.Fullpath(), dir.Info()) - if err != nil { - arch.Warn(dir.Path(), dir.Info(), err) - } - node = n - - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - } - - if err := dir.Error(); err != nil { - node.Error = err.Error() - } - - id, err := arch.SaveTreeJSON(ctx, tree) - if err != nil { - panic(err) - } - debug.Log("save tree for %s: %v", dir.Path(), id) - if id.IsNull() { - panic("invalid null subtree restic.ID return from SaveTreeJSON()") - } - - node.Subtree = &id - - debug.Log("sending result to %v", dir.Result()) - - dir.Result() <- node - if dir.Path() != "" { - p.Report(restic.Stat{Dirs: 1}) - } - case <-ctx.Done(): - // pipeline was cancelled - return - } - } -} - -type archivePipe struct { - Old <-chan walk.TreeJob - New <-chan pipe.Job -} - -func copyJobs(ctx context.Context, in <-chan pipe.Job, out chan<- pipe.Job) { - var ( - // disable sending on the outCh until we received a job - outCh chan<- pipe.Job - // enable receiving from in - inCh = in - job pipe.Job - ok bool - ) - - for { - select { - case <-ctx.Done(): - return - case job, ok = <-inCh: - if !ok { - // input channel closed, we're done - debug.Log("input channel closed, we're done") - return - } - inCh = nil - outCh = out - case outCh <- job: - outCh = nil - inCh = in - } - } -} - -type archiveJob struct { - hasOld bool - old walk.TreeJob - new pipe.Job -} - -func (a *archivePipe) compare(ctx context.Context, out chan<- pipe.Job) { - defer func() { - close(out) - debug.Log("done") - }() - - debug.Log("start") - var ( - loadOld, loadNew bool = true, true - ok bool - oldJob walk.TreeJob - newJob pipe.Job - ) - - for { - if loadOld { - oldJob, ok = <-a.Old - // if the old channel is closed, just pass through the new jobs - if !ok { - debug.Log("old channel is closed, copy from new channel") - - // handle remaining newJob - if !loadNew { - out <- archiveJob{new: newJob}.Copy() - } - - copyJobs(ctx, a.New, out) - return - } - - loadOld = false - } - - if loadNew { - newJob, ok = <-a.New - // if the new channel is closed, there are no more files in the current snapshot, return - if !ok { - debug.Log("new channel is closed, we're done") - return - } - - loadNew = false - } - - debug.Log("old job: %v", oldJob.Path) - debug.Log("new job: %v", newJob.Path()) - - // at this point we have received an old job as well as a new job, compare paths - file1 := oldJob.Path - file2 := newJob.Path() - - dir1 := filepath.Dir(file1) - dir2 := filepath.Dir(file2) - - if file1 == file2 { - debug.Log(" same filename %q", file1) - - // send job - out <- archiveJob{hasOld: true, old: oldJob, new: newJob}.Copy() - loadOld = true - loadNew = true - continue - } else if dir1 < dir2 { - debug.Log(" %q < %q, file %q added", dir1, dir2, file2) - // file is new, send new job and load new - loadNew = true - out <- archiveJob{new: newJob}.Copy() - continue - } else if dir1 == dir2 { - if file1 < file2 { - debug.Log(" %q < %q, file %q removed", file1, file2, file1) - // file has been removed, load new old - loadOld = true - continue - } else { - debug.Log(" %q > %q, file %q added", file1, file2, file2) - // file is new, send new job and load new - loadNew = true - out <- archiveJob{new: newJob}.Copy() - continue - } - } - - debug.Log(" %q > %q, file %q removed", file1, file2, file1) - // file has been removed, throw away old job and load new - loadOld = true - } -} - -func (j archiveJob) Copy() pipe.Job { - if !j.hasOld { - return j.new - } - - // handle files - if isRegularFile(j.new.Info()) { - debug.Log(" job %v is file", j.new.Path()) - - // if type has changed, return new job directly - if j.old.Node == nil { - return j.new - } - - // if file is newer, return the new job - if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) { - debug.Log(" job %v is newer", j.new.Path()) - return j.new - } - - debug.Log(" job %v add old data", j.new.Path()) - // otherwise annotate job with old data - e := j.new.(pipe.Entry) - e.Node = j.old.Node - return e - } - - // dirs and other types are just returned - return j.new -} - -const saveIndexTime = 30 * time.Second - -// saveIndexes regularly queries the master index for full indexes and saves them. -func (arch *Archiver) saveIndexes(saveCtx, shutdownCtx context.Context, wg *sync.WaitGroup) { - defer wg.Done() - - ticker := time.NewTicker(saveIndexTime) - defer ticker.Stop() - - for { - select { - case <-saveCtx.Done(): - return - case <-shutdownCtx.Done(): - return - case <-ticker.C: - debug.Log("saving full indexes") - err := arch.repo.SaveFullIndex(saveCtx) - if err != nil { - debug.Log("save indexes returned an error: %v", err) - fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err) - } - } - } -} - -// unique returns a slice that only contains unique strings. -func unique(items []string) []string { - seen := make(map[string]struct{}) - for _, item := range items { - seen[item] = struct{}{} - } - - items = items[:0] - for item := range seen { - items = append(items, item) - } - return items -} - -// baseNameSlice allows sorting paths by basename. -// -// Snapshots have contents sorted by basename, but we receive full paths. -// For the archivePipe to advance them in pairs, we traverse the given -// paths in the same order as the snapshot. -type baseNameSlice []string - -func (p baseNameSlice) Len() int { return len(p) } -func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) } -func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is -// used to compare the files to the ones archived at the time this snapshot was -// taken. -func (arch *Archiver) Snapshot(ctx context.Context, p *restic.Progress, paths, tags []string, hostname string, parentID *restic.ID, time time.Time) (*restic.Snapshot, restic.ID, error) { - paths = unique(paths) - sort.Sort(baseNameSlice(paths)) - - debug.Log("start for %v", paths) - - debug.RunHook("Archiver.Snapshot", nil) - - // signal the whole pipeline to stop - var err error - - p.Start() - defer p.Done() - - // create new snapshot - sn, err := restic.NewSnapshot(paths, tags, hostname, time) - if err != nil { - return nil, restic.ID{}, err - } - sn.Excludes = arch.Excludes - - // make paths absolute - for i, path := range paths { - if p, err := filepath.Abs(path); err == nil { - paths[i] = p - } - } - - jobs := archivePipe{} - - // use parent snapshot (if some was given) - if parentID != nil { - sn.Parent = parentID - - // load parent snapshot - parent, err := restic.LoadSnapshot(ctx, arch.repo, *parentID) - if err != nil { - return nil, restic.ID{}, err - } - - // start walker on old tree - ch := make(chan walk.TreeJob) - go walk.Tree(ctx, arch.repo, *parent.Tree, ch) - jobs.Old = ch - } else { - // use closed channel - ch := make(chan walk.TreeJob) - close(ch) - jobs.Old = ch - } - - // start walker - pipeCh := make(chan pipe.Job) - resCh := make(chan pipe.Result, 1) - go func() { - pipe.Walk(ctx, paths, arch.SelectFilter, pipeCh, resCh) - debug.Log("pipe.Walk done") - }() - jobs.New = pipeCh - - ch := make(chan pipe.Job) - go jobs.compare(ctx, ch) - - var wg sync.WaitGroup - entCh := make(chan pipe.Entry) - dirCh := make(chan pipe.Dir) - - // split - wg.Add(1) - go func() { - pipe.Split(ch, dirCh, entCh) - debug.Log("split done") - close(dirCh) - close(entCh) - wg.Done() - }() - - // run workers - for i := 0; i < maxConcurrency; i++ { - wg.Add(2) - go arch.fileWorker(ctx, &wg, p, entCh) - go arch.dirWorker(ctx, &wg, p, dirCh) - } - - // run index saver - var wgIndexSaver sync.WaitGroup - shutdownCtx, indexShutdown := context.WithCancel(ctx) - wgIndexSaver.Add(1) - go arch.saveIndexes(ctx, shutdownCtx, &wgIndexSaver) - - // wait for all workers to terminate - debug.Log("wait for workers") - wg.Wait() - - // stop index saver - indexShutdown() - wgIndexSaver.Wait() - - debug.Log("workers terminated") - - // flush repository - err = arch.repo.Flush(ctx) - if err != nil { - return nil, restic.ID{}, err - } - - // receive the top-level tree - root := (<-resCh).(*restic.Node) - debug.Log("root node received: %v", root.Subtree) - sn.Tree = root.Subtree - - // load top-level tree again to see if it is empty - toptree, err := arch.repo.LoadTree(ctx, *root.Subtree) - if err != nil { - return nil, restic.ID{}, err - } - - if len(toptree.Nodes) == 0 { - return nil, restic.ID{}, errors.Fatal("no files/dirs saved, refusing to create empty snapshot") - } - - // save index - err = arch.repo.SaveIndex(ctx) - if err != nil { - debug.Log("error saving index: %v", err) - return nil, restic.ID{}, err - } - - debug.Log("saved indexes") - - // save snapshot - id, err := arch.repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) - if err != nil { - return nil, restic.ID{}, err - } - - debug.Log("saved snapshot %v", id) - - return sn, id, nil -} - -func isRegularFile(fi os.FileInfo) bool { - if fi == nil { - return false - } - - return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 -} - -// Scan traverses the dirs to collect restic.Stat information while emitting progress -// information with p. -func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) { - p.Start() - defer p.Done() - - var stat restic.Stat - - for _, dir := range dirs { - debug.Log("Start for %v", dir) - err := fs.Walk(dir, func(str string, fi os.FileInfo, err error) error { - // TODO: integrate error reporting - if err != nil { - fmt.Fprintf(os.Stderr, "error for %v: %v\n", str, err) - return nil - } - if fi == nil { - fmt.Fprintf(os.Stderr, "error for %v: FileInfo is nil\n", str) - return nil - } - - if !filter(str, fi) { - debug.Log("path %v excluded", str) - if fi.IsDir() { - return filepath.SkipDir - } - return nil - } - - s := restic.Stat{} - if fi.IsDir() { - s.Dirs++ - } else { - s.Files++ - - if isRegularFile(fi) { - s.Bytes += uint64(fi.Size()) - } - } - - p.Report(s) - stat.Add(s) - - // TODO: handle error? - return nil - }) - - debug.Log("Done for %v, err: %v", dir, err) - if err != nil { - return restic.Stat{}, errors.Wrap(err, "fs.Walk") - } - } - - return stat, nil -} diff --git a/internal/archiver/archiver_int_test.go b/internal/archiver/archiver_int_test.go deleted file mode 100644 index b1273fee9..000000000 --- a/internal/archiver/archiver_int_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package archiver - -import ( - "context" - "os" - "testing" - - "github.com/restic/restic/internal/pipe" - "github.com/restic/restic/internal/walk" -) - -var treeJobs = []string{ - "foo/baz/subdir", - "foo/baz", - "foo", - "quu/bar/file1", - "quu/bar/file2", - "quu/foo/file1", - "quu/foo/file2", - "quu/foo/file3", - "quu/foo", - "quu/fooz", - "quu", - "yy/a", - "yy/b", - "yy", -} - -var pipeJobs = []string{ - "foo/baz/subdir", - "foo/baz/subdir2", // subdir2 added - "foo/baz", - "foo", - "quu/bar/.file1.swp", // file with . added - "quu/bar/file1", - "quu/bar/file2", - "quu/foo/file1", // file2 removed - "quu/foo/file3", - "quu/foo", - "quu", - "quv/file1", // files added and removed - "quv/file2", - "quv", - "yy", - "zz/file1", // files removed and added at the end - "zz/file2", - "zz", -} - -var resultJobs = []struct { - path string - action string -}{ - {"foo/baz/subdir", "same, not a file"}, - {"foo/baz/subdir2", "new, no old job"}, - {"foo/baz", "same, not a file"}, - {"foo", "same, not a file"}, - {"quu/bar/.file1.swp", "new, no old job"}, - {"quu/bar/file1", "same, not a file"}, - {"quu/bar/file2", "same, not a file"}, - {"quu/foo/file1", "same, not a file"}, - {"quu/foo/file3", "same, not a file"}, - {"quu/foo", "same, not a file"}, - {"quu", "same, not a file"}, - {"quv/file1", "new, no old job"}, - {"quv/file2", "new, no old job"}, - {"quv", "new, no old job"}, - {"yy", "same, not a file"}, - {"zz/file1", "testPipeJob"}, - {"zz/file2", "testPipeJob"}, - {"zz", "testPipeJob"}, -} - -type testPipeJob struct { - path string - err error - fi os.FileInfo - res chan<- pipe.Result -} - -func (j testPipeJob) Path() string { return j.path } -func (j testPipeJob) Fullpath() string { return j.path } -func (j testPipeJob) Error() error { return j.err } -func (j testPipeJob) Info() os.FileInfo { return j.fi } -func (j testPipeJob) Result() chan<- pipe.Result { return j.res } - -func testTreeWalker(ctx context.Context, out chan<- walk.TreeJob) { - for _, e := range treeJobs { - select { - case <-ctx.Done(): - return - case out <- walk.TreeJob{Path: e}: - } - } - - close(out) -} - -func testPipeWalker(ctx context.Context, out chan<- pipe.Job) { - for _, e := range pipeJobs { - select { - case <-ctx.Done(): - return - case out <- testPipeJob{path: e}: - } - } - - close(out) -} - -func TestArchivePipe(t *testing.T) { - ctx := context.TODO() - - treeCh := make(chan walk.TreeJob) - pipeCh := make(chan pipe.Job) - - go testTreeWalker(ctx, treeCh) - go testPipeWalker(ctx, pipeCh) - - p := archivePipe{Old: treeCh, New: pipeCh} - - ch := make(chan pipe.Job) - - go p.compare(ctx, ch) - - i := 0 - for job := range ch { - if job.Path() != resultJobs[i].path { - t.Fatalf("wrong job received: wanted %v, got %v", resultJobs[i], job) - } - - // switch j := job.(type) { - // case archivePipeJob: - // if j.action != resultJobs[i].action { - // t.Fatalf("wrong action for %v detected: wanted %q, got %q", job.Path(), resultJobs[i].action, j.action) - // } - // case testPipeJob: - // if resultJobs[i].action != "testPipeJob" { - // t.Fatalf("unexpected testPipeJob, expected %q: %v", resultJobs[i].action, j) - // } - // } - - i++ - } -} diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go deleted file mode 100644 index 1e8b9355f..000000000 --- a/internal/archiver/archiver_test.go +++ /dev/null @@ -1,258 +0,0 @@ -package archiver_test - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/restic/restic/internal/archiver" - "github.com/restic/restic/internal/crypto" - "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" - rtest "github.com/restic/restic/internal/test" - - "github.com/restic/restic/internal/errors" - - "github.com/restic/chunker" -) - -var testPol = chunker.Pol(0x3DA3358B4DC173) - -type Rdr interface { - io.ReadSeeker - io.ReaderAt -} - -func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) { - rd.Seek(0, 0) - ch := chunker.New(rd, testPol) - nonce := crypto.NewRandomNonce() - - for { - chunk, err := ch.Next(buf) - - if errors.Cause(err) == io.EOF { - break - } - - rtest.OK(b, err) - - rtest.Assert(b, uint(len(chunk.Data)) == chunk.Length, - "invalid length: got %d, expected %d", len(chunk.Data), chunk.Length) - - _ = key.Seal(buf2[:0], nonce, chunk.Data, nil) - } -} - -func BenchmarkChunkEncrypt(b *testing.B) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() - - data := rtest.Random(23, 10<<20) // 10MiB - rd := bytes.NewReader(data) - - buf := make([]byte, chunker.MaxSize) - buf2 := make([]byte, chunker.MaxSize) - - b.ResetTimer() - b.SetBytes(int64(len(data))) - - for i := 0; i < b.N; i++ { - benchmarkChunkEncrypt(b, buf, buf2, rd, repo.Key()) - } -} - -func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) { - ch := chunker.New(rd, testPol) - nonce := crypto.NewRandomNonce() - - for { - chunk, err := ch.Next(buf) - if errors.Cause(err) == io.EOF { - break - } - - _ = key.Seal(chunk.Data[:0], nonce, chunk.Data, nil) - } -} - -func BenchmarkChunkEncryptParallel(b *testing.B) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() - - data := rtest.Random(23, 10<<20) // 10MiB - - buf := make([]byte, chunker.MaxSize) - - b.ResetTimer() - b.SetBytes(int64(len(data))) - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - rd := bytes.NewReader(data) - benchmarkChunkEncryptP(pb, buf, rd, repo.Key()) - } - }) -} - -func archiveDirectory(b testing.TB) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() - - arch := archiver.New(repo) - - _, id, err := arch.Snapshot(context.TODO(), nil, []string{rtest.BenchArchiveDirectory}, nil, "localhost", nil, time.Now()) - rtest.OK(b, err) - - b.Logf("snapshot archived as %v", id) -} - -func TestArchiveDirectory(t *testing.T) { - if rtest.BenchArchiveDirectory == "" { - t.Skip("benchdir not set, skipping TestArchiveDirectory") - } - - archiveDirectory(t) -} - -func BenchmarkArchiveDirectory(b *testing.B) { - if rtest.BenchArchiveDirectory == "" { - b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory") - } - - for i := 0; i < b.N; i++ { - archiveDirectory(b) - } -} - -func countPacks(t testing.TB, repo restic.Repository, tpe restic.FileType) (n uint) { - err := repo.Backend().List(context.TODO(), tpe, func(restic.FileInfo) error { - n++ - return nil - }) - if err != nil { - t.Fatal(err) - } - - return n -} - -func archiveWithDedup(t testing.TB) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - if rtest.BenchArchiveDirectory == "" { - t.Skip("benchdir not set, skipping TestArchiverDedup") - } - - var cnt struct { - before, after, after2 struct { - packs, dataBlobs, treeBlobs uint - } - } - - // archive a few files - sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) - t.Logf("archived snapshot %v", sn.ID().Str()) - - // get archive stats - cnt.before.packs = countPacks(t, repo, restic.DataFile) - cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob) - cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob) - t.Logf("packs %v, data blobs %v, tree blobs %v", - cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) - - // archive the same files again, without parent snapshot - sn2 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) - t.Logf("archived snapshot %v", sn2.ID().Str()) - - // get archive stats again - cnt.after.packs = countPacks(t, repo, restic.DataFile) - cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob) - cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob) - t.Logf("packs %v, data blobs %v, tree blobs %v", - cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) - - // if there are more data blobs, something is wrong - if cnt.after.dataBlobs > cnt.before.dataBlobs { - t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", - cnt.before.dataBlobs, cnt.after.dataBlobs) - } - - // archive the same files again, with a parent snapshot - sn3 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, sn2.ID()) - t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) - - // get archive stats again - cnt.after2.packs = countPacks(t, repo, restic.DataFile) - cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob) - cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob) - t.Logf("packs %v, data blobs %v, tree blobs %v", - cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) - - // if there are more data blobs, something is wrong - if cnt.after2.dataBlobs > cnt.before.dataBlobs { - t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", - cnt.before.dataBlobs, cnt.after2.dataBlobs) - } -} - -func TestArchiveDedup(t *testing.T) { - archiveWithDedup(t) -} - -func TestArchiveEmptySnapshot(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - arch := archiver.New(repo) - - sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil, time.Now()) - if err == nil { - t.Errorf("expected error for empty snapshot, got nil") - } - - if !id.IsNull() { - t.Errorf("expected null ID for empty snapshot, got %v", id.Str()) - } - - if sn != nil { - t.Errorf("expected null snapshot for empty snapshot, got %v", sn) - } -} - -func TestArchiveNameCollision(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - dir, cleanup := rtest.TempDir(t) - defer cleanup() - - root := filepath.Join(dir, "root") - rtest.OK(t, os.MkdirAll(root, 0755)) - - rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "testfile"), []byte("testfile1"), 0644)) - rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "root", "testfile"), []byte("testfile2"), 0644)) - - defer fs.TestChdir(t, root)() - - arch := archiver.New(repo) - - sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"testfile", filepath.Join("..", "testfile")}, nil, "localhost", nil, time.Now()) - rtest.OK(t, err) - - t.Logf("snapshot archived as %v", id) - - tree, err := repo.LoadTree(context.TODO(), *sn.Tree) - rtest.OK(t, err) - - if len(tree.Nodes) != 2 { - t.Fatalf("tree has %d nodes, wanted 2: %v", len(tree.Nodes), tree.Nodes) - } -} diff --git a/internal/archiver/buffer_pool.go b/internal/archiver/buffer_pool.go deleted file mode 100644 index 32df5ab7b..000000000 --- a/internal/archiver/buffer_pool.go +++ /dev/null @@ -1,21 +0,0 @@ -package archiver - -import ( - "sync" - - "github.com/restic/chunker" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return make([]byte, chunker.MinSize) - }, -} - -func getBuf() []byte { - return bufPool.Get().([]byte) -} - -func freeBuf(data []byte) { - bufPool.Put(data) -} diff --git a/internal/pipe/doc.go b/internal/pipe/doc.go deleted file mode 100644 index ba5fc04ae..000000000 --- a/internal/pipe/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package pipe implements walking a directory in a deterministic order. -package pipe diff --git a/internal/pipe/pipe.go b/internal/pipe/pipe.go deleted file mode 100644 index 1cf7ff6bd..000000000 --- a/internal/pipe/pipe.go +++ /dev/null @@ -1,292 +0,0 @@ -package pipe - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sort" - - "github.com/restic/restic/internal/errors" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" -) - -type Result interface{} - -type Job interface { - Path() string - Fullpath() string - Error() error - Info() os.FileInfo - - Result() chan<- Result -} - -type Entry struct { - basedir string - path string - info os.FileInfo - error error - result chan<- Result - - // points to the old node if available, interface{} is used to prevent - // circular import - Node interface{} -} - -func (e Entry) Path() string { return e.path } -func (e Entry) Fullpath() string { return filepath.Join(e.basedir, e.path) } -func (e Entry) Error() error { return e.error } -func (e Entry) Info() os.FileInfo { return e.info } -func (e Entry) Result() chan<- Result { return e.result } - -type Dir struct { - basedir string - path string - error error - info os.FileInfo - - Entries [](<-chan Result) - result chan<- Result -} - -func (e Dir) Path() string { return e.path } -func (e Dir) Fullpath() string { return filepath.Join(e.basedir, e.path) } -func (e Dir) Error() error { return e.error } -func (e Dir) Info() os.FileInfo { return e.info } -func (e Dir) Result() chan<- Result { return e.result } - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// taken from filepath/path.go -func readDirNames(dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, errors.Wrap(err, "Open") - } - names, err := f.Readdirnames(-1) - _ = f.Close() - if err != nil { - return nil, errors.Wrap(err, "Readdirnames") - } - sort.Strings(names) - return names, nil -} - -// SelectFunc returns true for all items that should be included (files and -// dirs). If false is returned, files are ignored and dirs are not even walked. -type SelectFunc func(item string, fi os.FileInfo) bool - -func walk(ctx context.Context, basedir, dir string, selectFunc SelectFunc, jobs chan<- Job, res chan<- Result) (excluded bool) { - debug.Log("start on %q, basedir %q", dir, basedir) - - relpath, err := filepath.Rel(basedir, dir) - if err != nil { - panic(err) - } - - info, err := fs.Lstat(dir) - if err != nil { - err = errors.Wrap(err, "Lstat") - debug.Log("error for %v: %v, res %p", dir, err, res) - select { - case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}: - case <-ctx.Done(): - } - return - } - - if !selectFunc(dir, info) { - debug.Log("file %v excluded by filter, res %p", dir, res) - excluded = true - return - } - - if !info.IsDir() { - debug.Log("sending file job for %v, res %p", dir, res) - select { - case jobs <- Entry{info: info, basedir: basedir, path: relpath, result: res}: - case <-ctx.Done(): - } - return - } - - debug.RunHook("pipe.readdirnames", dir) - names, err := readDirNames(dir) - if err != nil { - debug.Log("Readdirnames(%v) returned error: %v, res %p", dir, err, res) - select { - case <-ctx.Done(): - case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}: - } - return - } - - // Insert breakpoint to allow testing behaviour with vanishing files - // between Readdir() and lstat() - debug.RunHook("pipe.walk1", relpath) - - entries := make([]<-chan Result, 0, len(names)) - - for _, name := range names { - subpath := filepath.Join(dir, name) - - fi, statErr := fs.Lstat(subpath) - if !selectFunc(subpath, fi) { - debug.Log("file %v excluded by filter", subpath) - continue - } - - ch := make(chan Result, 1) - entries = append(entries, ch) - - if statErr != nil { - statErr = errors.Wrap(statErr, "Lstat") - debug.Log("sending file job for %v, err %v, res %p", subpath, err, res) - select { - case jobs <- Entry{info: fi, error: statErr, basedir: basedir, path: filepath.Join(relpath, name), result: ch}: - case <-ctx.Done(): - return - } - continue - } - - // Insert breakpoint to allow testing behaviour with vanishing files - // between walk and open - debug.RunHook("pipe.walk2", filepath.Join(relpath, name)) - - walk(ctx, basedir, subpath, selectFunc, jobs, ch) - } - - debug.Log("sending dirjob for %q, basedir %q, res %p", dir, basedir, res) - select { - case jobs <- Dir{basedir: basedir, path: relpath, info: info, Entries: entries, result: res}: - case <-ctx.Done(): - } - - return -} - -// cleanupPath is used to clean a path. For a normal path, a slice with just -// the path is returned. For special cases such as "." and "/" the list of -// names within those paths is returned. -func cleanupPath(path string) ([]string, error) { - path = filepath.Clean(path) - if filepath.Dir(path) != path { - return []string{path}, nil - } - - paths, err := readDirNames(path) - if err != nil { - return nil, err - } - - for i, p := range paths { - paths[i] = filepath.Join(path, p) - } - - return paths, nil -} - -// Walk sends a Job for each file and directory it finds below the paths. When -// the channel done is closed, processing stops. -func Walk(ctx context.Context, walkPaths []string, selectFunc SelectFunc, jobs chan<- Job, res chan<- Result) { - var paths []string - - for _, p := range walkPaths { - ps, err := cleanupPath(p) - if err != nil { - fmt.Fprintf(os.Stderr, "Readdirnames(%v): %v, skipping\n", p, err) - debug.Log("Readdirnames(%v) returned error: %v, skipping", p, err) - continue - } - - paths = append(paths, ps...) - } - - debug.Log("start on %v", paths) - defer func() { - debug.Log("output channel closed") - close(jobs) - }() - - entries := make([]<-chan Result, 0, len(paths)) - for _, path := range paths { - debug.Log("start walker for %v", path) - ch := make(chan Result, 1) - excluded := walk(ctx, filepath.Dir(path), path, selectFunc, jobs, ch) - - if excluded { - debug.Log("walker for %v done, it was excluded by the filter", path) - continue - } - - entries = append(entries, ch) - debug.Log("walker for %v done", path) - } - - debug.Log("sending root node, res %p", res) - select { - case <-ctx.Done(): - return - case jobs <- Dir{Entries: entries, result: res}: - } - - debug.Log("walker done") -} - -// Split feeds all elements read from inChan to dirChan and entChan. -func Split(inChan <-chan Job, dirChan chan<- Dir, entChan chan<- Entry) { - debug.Log("start") - defer debug.Log("done") - - inCh := inChan - dirCh := dirChan - entCh := entChan - - var ( - dir Dir - ent Entry - ) - - // deactivate sending until we received at least one job - dirCh = nil - entCh = nil - for { - select { - case job, ok := <-inCh: - if !ok { - // channel is closed - return - } - - if job == nil { - panic("nil job received") - } - - // disable receiving until the current job has been sent - inCh = nil - - switch j := job.(type) { - case Dir: - dir = j - dirCh = dirChan - case Entry: - ent = j - entCh = entChan - default: - panic(fmt.Sprintf("unknown job type %v", j)) - } - case dirCh <- dir: - // disable sending, re-enable receiving - dirCh = nil - inCh = inChan - case entCh <- ent: - // disable sending, re-enable receiving - entCh = nil - inCh = inChan - } - } -} diff --git a/internal/pipe/pipe_test.go b/internal/pipe/pipe_test.go deleted file mode 100644 index 1612987e7..000000000 --- a/internal/pipe/pipe_test.go +++ /dev/null @@ -1,600 +0,0 @@ -package pipe_test - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/pipe" - rtest "github.com/restic/restic/internal/test" -) - -type stats struct { - dirs, files int -} - -func acceptAll(string, os.FileInfo) bool { - return true -} - -func statPath(path string) (stats, error) { - var s stats - - // count files and directories with filepath.Walk() - err := filepath.Walk(rtest.TestWalkerPath, func(p string, fi os.FileInfo, err error) error { - if fi == nil { - return err - } - - if fi.IsDir() { - s.dirs++ - } else { - s.files++ - } - - return err - }) - - return s, err -} - -const maxWorkers = 100 - -func TestPipelineWalkerWithSplit(t *testing.T) { - if rtest.TestWalkerPath == "" { - t.Skipf("walkerpath not set, skipping TestPipelineWalker") - } - - var err error - if !filepath.IsAbs(rtest.TestWalkerPath) { - rtest.TestWalkerPath, err = filepath.Abs(rtest.TestWalkerPath) - rtest.OK(t, err) - } - - before, err := statPath(rtest.TestWalkerPath) - rtest.OK(t, err) - - t.Logf("walking path %s with %d dirs, %d files", rtest.TestWalkerPath, - before.dirs, before.files) - - // account for top level dir - before.dirs++ - - after := stats{} - m := sync.Mutex{} - - worker := func(wg *sync.WaitGroup, done <-chan struct{}, entCh <-chan pipe.Entry, dirCh <-chan pipe.Dir) { - defer wg.Done() - for { - select { - case e, ok := <-entCh: - if !ok { - // channel is closed - return - } - - m.Lock() - after.files++ - m.Unlock() - - e.Result() <- true - - case dir, ok := <-dirCh: - if !ok { - // channel is closed - return - } - - // wait for all content - for _, ch := range dir.Entries { - <-ch - } - - m.Lock() - after.dirs++ - m.Unlock() - - dir.Result() <- true - case <-done: - // pipeline was cancelled - return - } - } - } - - var wg sync.WaitGroup - done := make(chan struct{}) - entCh := make(chan pipe.Entry) - dirCh := make(chan pipe.Dir) - - for i := 0; i < maxWorkers; i++ { - wg.Add(1) - go worker(&wg, done, entCh, dirCh) - } - - jobs := make(chan pipe.Job, 200) - wg.Add(1) - go func() { - pipe.Split(jobs, dirCh, entCh) - close(entCh) - close(dirCh) - wg.Done() - }() - - resCh := make(chan pipe.Result, 1) - pipe.Walk(context.TODO(), []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for top-level blob - <-resCh - - t.Logf("walked path %s with %d dirs, %d files", rtest.TestWalkerPath, - after.dirs, after.files) - - rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) -} - -func TestPipelineWalker(t *testing.T) { - if rtest.TestWalkerPath == "" { - t.Skipf("walkerpath not set, skipping TestPipelineWalker") - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - var err error - if !filepath.IsAbs(rtest.TestWalkerPath) { - rtest.TestWalkerPath, err = filepath.Abs(rtest.TestWalkerPath) - rtest.OK(t, err) - } - - before, err := statPath(rtest.TestWalkerPath) - rtest.OK(t, err) - - t.Logf("walking path %s with %d dirs, %d files", rtest.TestWalkerPath, - before.dirs, before.files) - - // account for top level dir - before.dirs++ - - after := stats{} - m := sync.Mutex{} - - worker := func(ctx context.Context, wg *sync.WaitGroup, jobs <-chan pipe.Job) { - defer wg.Done() - for { - select { - case job, ok := <-jobs: - if !ok { - // channel is closed - return - } - rtest.Assert(t, job != nil, "job is nil") - - switch j := job.(type) { - case pipe.Dir: - // wait for all content - for _, ch := range j.Entries { - <-ch - } - - m.Lock() - after.dirs++ - m.Unlock() - - j.Result() <- true - case pipe.Entry: - m.Lock() - after.files++ - m.Unlock() - - j.Result() <- true - } - - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - var wg sync.WaitGroup - jobs := make(chan pipe.Job) - - for i := 0; i < maxWorkers; i++ { - wg.Add(1) - go worker(ctx, &wg, jobs) - } - - resCh := make(chan pipe.Result, 1) - pipe.Walk(ctx, []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for top-level blob - <-resCh - - t.Logf("walked path %s with %d dirs, %d files", rtest.TestWalkerPath, - after.dirs, after.files) - - rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) -} - -func createFile(filename, data string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - - defer f.Close() - - _, err = f.Write([]byte(data)) - if err != nil { - return err - } - - return nil -} - -func TestPipeWalkerError(t *testing.T) { - dir, err := ioutil.TempDir("", "restic-test-") - rtest.OK(t, err) - - base := filepath.Base(dir) - - var testjobs = []struct { - path []string - err bool - }{ - {[]string{base, "a", "file_a"}, false}, - {[]string{base, "a"}, false}, - {[]string{base, "b"}, true}, - {[]string{base, "c", "file_c"}, false}, - {[]string{base, "c"}, false}, - {[]string{base}, false}, - {[]string{}, false}, - } - - rtest.OK(t, os.Mkdir(filepath.Join(dir, "a"), 0755)) - rtest.OK(t, os.Mkdir(filepath.Join(dir, "b"), 0755)) - rtest.OK(t, os.Mkdir(filepath.Join(dir, "c"), 0755)) - - rtest.OK(t, createFile(filepath.Join(dir, "a", "file_a"), "file a")) - rtest.OK(t, createFile(filepath.Join(dir, "b", "file_b"), "file b")) - rtest.OK(t, createFile(filepath.Join(dir, "c", "file_c"), "file c")) - - ranHook := false - testdir := filepath.Join(dir, "b") - - // install hook that removes the dir right before readdirnames() - debug.Hook("pipe.readdirnames", func(context interface{}) { - path := context.(string) - - if path != testdir { - return - } - - t.Logf("in hook, removing test file %v", testdir) - ranHook = true - - rtest.OK(t, os.RemoveAll(testdir)) - }) - - ctx, cancel := context.WithCancel(context.TODO()) - - ch := make(chan pipe.Job) - resCh := make(chan pipe.Result, 1) - - go pipe.Walk(ctx, []string{dir}, acceptAll, ch, resCh) - - i := 0 - for job := range ch { - if i == len(testjobs) { - t.Errorf("too many jobs received") - break - } - - p := filepath.Join(testjobs[i].path...) - if p != job.Path() { - t.Errorf("job %d has wrong path: expected %q, got %q", i, p, job.Path()) - } - - if testjobs[i].err { - if job.Error() == nil { - t.Errorf("job %d expected error but got nil", i) - } - } else { - if job.Error() != nil { - t.Errorf("job %d expected no error but got %v", i, job.Error()) - } - } - - i++ - } - - if i != len(testjobs) { - t.Errorf("expected %d jobs, got %d", len(testjobs), i) - } - - cancel() - - rtest.Assert(t, ranHook, "hook did not run") - rtest.OK(t, os.RemoveAll(dir)) -} - -func BenchmarkPipelineWalker(b *testing.B) { - if rtest.TestWalkerPath == "" { - b.Skipf("walkerpath not set, skipping BenchPipelineWalker") - } - - var max time.Duration - m := sync.Mutex{} - - fileWorker := func(ctx context.Context, wg *sync.WaitGroup, ch <-chan pipe.Entry) { - defer wg.Done() - for { - select { - case e, ok := <-ch: - if !ok { - // channel is closed - return - } - - // simulate backup - //time.Sleep(10 * time.Millisecond) - - e.Result() <- true - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - dirWorker := func(ctx context.Context, wg *sync.WaitGroup, ch <-chan pipe.Dir) { - defer wg.Done() - for { - select { - case dir, ok := <-ch: - if !ok { - // channel is closed - return - } - - start := time.Now() - - // wait for all content - for _, ch := range dir.Entries { - <-ch - } - - d := time.Since(start) - m.Lock() - if d > max { - max = d - } - m.Unlock() - - dir.Result() <- true - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - for i := 0; i < b.N; i++ { - max = 0 - entCh := make(chan pipe.Entry, 200) - dirCh := make(chan pipe.Dir, 200) - - var wg sync.WaitGroup - b.Logf("starting %d workers", maxWorkers) - for i := 0; i < maxWorkers; i++ { - wg.Add(2) - go dirWorker(ctx, &wg, dirCh) - go fileWorker(ctx, &wg, entCh) - } - - jobs := make(chan pipe.Job, 200) - wg.Add(1) - go func() { - pipe.Split(jobs, dirCh, entCh) - close(entCh) - close(dirCh) - wg.Done() - }() - - resCh := make(chan pipe.Result, 1) - pipe.Walk(ctx, []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for final result - <-resCh - - b.Logf("max duration for a dir: %v", max) - } -} - -func TestPipelineWalkerMultiple(t *testing.T) { - if rtest.TestWalkerPath == "" { - t.Skipf("walkerpath not set, skipping TestPipelineWalker") - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - paths, err := filepath.Glob(filepath.Join(rtest.TestWalkerPath, "*")) - rtest.OK(t, err) - - before, err := statPath(rtest.TestWalkerPath) - rtest.OK(t, err) - - t.Logf("walking paths %v with %d dirs, %d files", paths, - before.dirs, before.files) - - after := stats{} - m := sync.Mutex{} - - worker := func(ctx context.Context, wg *sync.WaitGroup, jobs <-chan pipe.Job) { - defer wg.Done() - for { - select { - case job, ok := <-jobs: - if !ok { - // channel is closed - return - } - rtest.Assert(t, job != nil, "job is nil") - - switch j := job.(type) { - case pipe.Dir: - // wait for all content - for _, ch := range j.Entries { - <-ch - } - - m.Lock() - after.dirs++ - m.Unlock() - - j.Result() <- true - case pipe.Entry: - m.Lock() - after.files++ - m.Unlock() - - j.Result() <- true - } - - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - var wg sync.WaitGroup - jobs := make(chan pipe.Job) - - for i := 0; i < maxWorkers; i++ { - wg.Add(1) - go worker(ctx, &wg, jobs) - } - - resCh := make(chan pipe.Result, 1) - pipe.Walk(ctx, paths, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for top-level blob - <-resCh - - t.Logf("walked %d paths with %d dirs, %d files", len(paths), after.dirs, after.files) - - rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) -} - -func dirsInPath(path string) int { - if path == "/" || path == "." || path == "" { - return 0 - } - - n := 0 - for dir := path; dir != "/" && dir != "."; dir = filepath.Dir(dir) { - n++ - } - - return n -} - -func TestPipeWalkerRoot(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skipf("not running TestPipeWalkerRoot on %s", runtime.GOOS) - return - } - - cwd, err := os.Getwd() - rtest.OK(t, err) - - testPaths := []string{ - string(filepath.Separator), - ".", - cwd, - } - - for _, path := range testPaths { - testPipeWalkerRootWithPath(path, t) - } -} - -func testPipeWalkerRootWithPath(path string, t *testing.T) { - pattern := filepath.Join(path, "*") - rootPaths, err := filepath.Glob(pattern) - rtest.OK(t, err) - - for i, p := range rootPaths { - rootPaths[i], err = filepath.Rel(path, p) - rtest.OK(t, err) - } - - t.Logf("paths in %v (pattern %q) expanded to %v items", path, pattern, len(rootPaths)) - - jobCh := make(chan pipe.Job) - var jobs []pipe.Job - - worker := func(wg *sync.WaitGroup) { - defer wg.Done() - for job := range jobCh { - jobs = append(jobs, job) - } - } - - var wg sync.WaitGroup - wg.Add(1) - go worker(&wg) - - filter := func(p string, fi os.FileInfo) bool { - p, err := filepath.Rel(path, p) - rtest.OK(t, err) - return dirsInPath(p) <= 1 - } - - resCh := make(chan pipe.Result, 1) - pipe.Walk(context.TODO(), []string{path}, filter, jobCh, resCh) - - wg.Wait() - - t.Logf("received %d jobs", len(jobs)) - - for i, job := range jobs[:len(jobs)-1] { - path := job.Path() - if path == "." || path == ".." || path == string(filepath.Separator) { - t.Errorf("job %v has invalid path %q", i, path) - } - } - - lastPath := jobs[len(jobs)-1].Path() - if lastPath != "" { - t.Errorf("last job has non-empty path %q", lastPath) - } - - if len(jobs) < len(rootPaths) { - t.Errorf("want at least %v jobs, got %v for path %v\n", len(rootPaths), len(jobs), path) - } -} diff --git a/internal/walk/testdata/walktree-test-repo.tar.gz b/internal/walk/testdata/walktree-test-repo.tar.gz deleted file mode 100644 index 5f3d19c10..000000000 Binary files a/internal/walk/testdata/walktree-test-repo.tar.gz and /dev/null differ diff --git a/internal/walk/walk.go b/internal/walk/walk.go deleted file mode 100644 index 41618830a..000000000 --- a/internal/walk/walk.go +++ /dev/null @@ -1,197 +0,0 @@ -package walk - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/restic" -) - -// TreeJob is a job sent from the tree walker. -type TreeJob struct { - Path string - Error error - - Node *restic.Node - Tree *restic.Tree -} - -// TreeWalker traverses a tree in the repository depth-first and sends a job -// for each item (file or dir) that it encounters. -type TreeWalker struct { - ch chan<- loadTreeJob - out chan<- TreeJob -} - -// NewTreeWalker uses ch to load trees from the repository and sends jobs to -// out. -func NewTreeWalker(ch chan<- loadTreeJob, out chan<- TreeJob) *TreeWalker { - return &TreeWalker{ch: ch, out: out} -} - -// Walk starts walking the tree given by id. When the channel done is closed, -// processing stops. -func (tw *TreeWalker) Walk(ctx context.Context, path string, id restic.ID) { - debug.Log("starting on tree %v for %v", id, path) - defer debug.Log("done walking tree %v for %v", id, path) - - resCh := make(chan loadTreeResult, 1) - tw.ch <- loadTreeJob{ - id: id, - res: resCh, - } - - res := <-resCh - if res.err != nil { - select { - case tw.out <- TreeJob{Path: path, Error: res.err}: - case <-ctx.Done(): - return - } - return - } - - tw.walk(ctx, path, res.tree) - - select { - case tw.out <- TreeJob{Path: path, Tree: res.tree}: - case <-ctx.Done(): - return - } -} - -func (tw *TreeWalker) walk(ctx context.Context, path string, tree *restic.Tree) { - debug.Log("start on %q", path) - defer debug.Log("done for %q", path) - - debug.Log("tree %#v", tree) - - // load all subtrees in parallel - results := make([]<-chan loadTreeResult, len(tree.Nodes)) - for i, node := range tree.Nodes { - if node.Type == "dir" { - resCh := make(chan loadTreeResult, 1) - tw.ch <- loadTreeJob{ - id: *node.Subtree, - res: resCh, - } - - results[i] = resCh - } - } - - for i, node := range tree.Nodes { - p := filepath.Join(path, node.Name) - var job TreeJob - - if node.Type == "dir" { - if results[i] == nil { - panic("result chan should not be nil") - } - - res := <-results[i] - if res.err == nil { - tw.walk(ctx, p, res.tree) - } else { - fmt.Fprintf(os.Stderr, "error loading tree: %v\n", res.err) - } - - job = TreeJob{Path: p, Tree: res.tree, Error: res.err} - } else { - job = TreeJob{Path: p, Node: node} - } - - select { - case tw.out <- job: - case <-ctx.Done(): - return - } - } -} - -type loadTreeResult struct { - tree *restic.Tree - err error -} - -type loadTreeJob struct { - id restic.ID - res chan<- loadTreeResult -} - -type treeLoader func(restic.ID) (*restic.Tree, error) - -func loadTreeWorker(ctx context.Context, wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader) { - debug.Log("start") - defer debug.Log("exit") - defer wg.Done() - - for { - select { - case <-ctx.Done(): - debug.Log("done channel closed") - return - case job, ok := <-in: - if !ok { - debug.Log("input channel closed, exiting") - return - } - - debug.Log("received job to load tree %v", job.id) - tree, err := load(job.id) - - debug.Log("tree %v loaded, error %v", job.id, err) - - select { - case job.res <- loadTreeResult{tree, err}: - debug.Log("job result sent") - case <-ctx.Done(): - debug.Log("done channel closed before result could be sent") - return - } - } - } -} - -// TreeLoader loads tree objects. -type TreeLoader interface { - LoadTree(context.Context, restic.ID) (*restic.Tree, error) -} - -const loadTreeWorkers = 10 - -// Tree walks the tree specified by id recursively and sends a job for each -// file and directory it finds. When the channel done is closed, processing -// stops. -func Tree(ctx context.Context, repo TreeLoader, id restic.ID, jobCh chan<- TreeJob) { - debug.Log("start on %v, start workers", id) - - load := func(id restic.ID) (*restic.Tree, error) { - tree, err := repo.LoadTree(ctx, id) - if err != nil { - return nil, err - } - return tree, nil - } - - ch := make(chan loadTreeJob) - - var wg sync.WaitGroup - for i := 0; i < loadTreeWorkers; i++ { - wg.Add(1) - go loadTreeWorker(ctx, &wg, ch, load) - } - - tw := NewTreeWalker(ch, jobCh) - tw.Walk(ctx, "", id) - close(jobCh) - - close(ch) - wg.Wait() - - debug.Log("done") -} diff --git a/internal/walk/walk_test.go b/internal/walk/walk_test.go deleted file mode 100644 index b67ae9151..000000000 --- a/internal/walk/walk_test.go +++ /dev/null @@ -1,1394 +0,0 @@ -package walk_test - -import ( - "context" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/restic/restic/internal/archiver" - "github.com/restic/restic/internal/pipe" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" - rtest "github.com/restic/restic/internal/test" - "github.com/restic/restic/internal/walk" -) - -func TestWalkTree(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - dirs, err := filepath.Glob(rtest.TestWalkerPath) - rtest.OK(t, err) - - // archive a few files - arch := archiver.New(repo) - sn, _, err := arch.Snapshot(context.TODO(), nil, dirs, nil, "localhost", nil, time.Now()) - rtest.OK(t, err) - - // flush repo, write all packs - rtest.OK(t, repo.Flush(context.Background())) - - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(context.TODO(), repo, *sn.Tree, treeJobs) - - // start filesystem walker - fsJobs := make(chan pipe.Job) - resCh := make(chan pipe.Result, 1) - - f := func(string, os.FileInfo) bool { - return true - } - go pipe.Walk(context.TODO(), dirs, f, fsJobs, resCh) - - for { - // receive fs job - fsJob, fsChOpen := <-fsJobs - rtest.Assert(t, !fsChOpen || fsJob != nil, - "received nil job from filesystem: %v %v", fsJob, fsChOpen) - if fsJob != nil { - rtest.OK(t, fsJob.Error()) - } - - var path string - fsEntries := 1 - switch j := fsJob.(type) { - case pipe.Dir: - path = j.Path() - fsEntries = len(j.Entries) - case pipe.Entry: - path = j.Path() - } - - // receive tree job - treeJob, treeChOpen := <-treeJobs - treeEntries := 1 - - rtest.OK(t, treeJob.Error) - - if treeJob.Tree != nil { - treeEntries = len(treeJob.Tree.Nodes) - } - - rtest.Assert(t, fsChOpen == treeChOpen, - "one channel closed too early: fsChOpen %v, treeChOpen %v", - fsChOpen, treeChOpen) - - if !fsChOpen || !treeChOpen { - break - } - - rtest.Assert(t, filepath.Base(path) == filepath.Base(treeJob.Path), - "paths do not match: %q != %q", filepath.Base(path), filepath.Base(treeJob.Path)) - - rtest.Assert(t, fsEntries == treeEntries, - "wrong number of entries: %v != %v", fsEntries, treeEntries) - } -} - -type delayRepo struct { - repo restic.Repository - delay time.Duration -} - -func (d delayRepo) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) { - time.Sleep(d.delay) - return d.repo.LoadTree(ctx, id) -} - -var repoFixture = filepath.Join("testdata", "walktree-test-repo.tar.gz") - -var walktreeTestItems = []string{ - "testdata/0/0/0/0", - "testdata/0/0/0/1", - "testdata/0/0/0/10", - "testdata/0/0/0/100", - "testdata/0/0/0/101", - "testdata/0/0/0/102", - "testdata/0/0/0/103", - "testdata/0/0/0/104", - "testdata/0/0/0/105", - "testdata/0/0/0/106", - "testdata/0/0/0/107", - "testdata/0/0/0/108", - "testdata/0/0/0/109", - "testdata/0/0/0/11", - "testdata/0/0/0/110", - "testdata/0/0/0/111", - "testdata/0/0/0/112", - "testdata/0/0/0/113", - "testdata/0/0/0/114", - "testdata/0/0/0/115", - "testdata/0/0/0/116", - "testdata/0/0/0/117", - "testdata/0/0/0/118", - "testdata/0/0/0/119", - "testdata/0/0/0/12", - "testdata/0/0/0/120", - "testdata/0/0/0/121", - "testdata/0/0/0/122", - "testdata/0/0/0/123", - "testdata/0/0/0/124", - "testdata/0/0/0/125", - "testdata/0/0/0/126", - "testdata/0/0/0/127", - "testdata/0/0/0/13", - "testdata/0/0/0/14", - "testdata/0/0/0/15", - "testdata/0/0/0/16", - "testdata/0/0/0/17", - "testdata/0/0/0/18", - "testdata/0/0/0/19", - "testdata/0/0/0/2", - "testdata/0/0/0/20", - "testdata/0/0/0/21", - "testdata/0/0/0/22", - "testdata/0/0/0/23", - "testdata/0/0/0/24", - "testdata/0/0/0/25", - "testdata/0/0/0/26", - "testdata/0/0/0/27", - "testdata/0/0/0/28", - "testdata/0/0/0/29", - "testdata/0/0/0/3", - "testdata/0/0/0/30", - "testdata/0/0/0/31", - "testdata/0/0/0/32", - "testdata/0/0/0/33", - "testdata/0/0/0/34", - "testdata/0/0/0/35", - "testdata/0/0/0/36", - "testdata/0/0/0/37", - "testdata/0/0/0/38", - "testdata/0/0/0/39", - "testdata/0/0/0/4", - "testdata/0/0/0/40", - "testdata/0/0/0/41", - "testdata/0/0/0/42", - "testdata/0/0/0/43", - "testdata/0/0/0/44", - "testdata/0/0/0/45", - "testdata/0/0/0/46", - "testdata/0/0/0/47", - "testdata/0/0/0/48", - "testdata/0/0/0/49", - "testdata/0/0/0/5", - "testdata/0/0/0/50", - "testdata/0/0/0/51", - "testdata/0/0/0/52", - "testdata/0/0/0/53", - "testdata/0/0/0/54", - "testdata/0/0/0/55", - "testdata/0/0/0/56", - "testdata/0/0/0/57", - "testdata/0/0/0/58", - "testdata/0/0/0/59", - "testdata/0/0/0/6", - "testdata/0/0/0/60", - "testdata/0/0/0/61", - "testdata/0/0/0/62", - "testdata/0/0/0/63", - "testdata/0/0/0/64", - "testdata/0/0/0/65", - "testdata/0/0/0/66", - "testdata/0/0/0/67", - "testdata/0/0/0/68", - "testdata/0/0/0/69", - "testdata/0/0/0/7", - "testdata/0/0/0/70", - "testdata/0/0/0/71", - "testdata/0/0/0/72", - "testdata/0/0/0/73", - "testdata/0/0/0/74", - "testdata/0/0/0/75", - "testdata/0/0/0/76", - "testdata/0/0/0/77", - "testdata/0/0/0/78", - "testdata/0/0/0/79", - "testdata/0/0/0/8", - "testdata/0/0/0/80", - "testdata/0/0/0/81", - "testdata/0/0/0/82", - "testdata/0/0/0/83", - "testdata/0/0/0/84", - "testdata/0/0/0/85", - "testdata/0/0/0/86", - "testdata/0/0/0/87", - "testdata/0/0/0/88", - "testdata/0/0/0/89", - "testdata/0/0/0/9", - "testdata/0/0/0/90", - "testdata/0/0/0/91", - "testdata/0/0/0/92", - "testdata/0/0/0/93", - "testdata/0/0/0/94", - "testdata/0/0/0/95", - "testdata/0/0/0/96", - "testdata/0/0/0/97", - "testdata/0/0/0/98", - "testdata/0/0/0/99", - "testdata/0/0/0", - "testdata/0/0/1/0", - "testdata/0/0/1/1", - "testdata/0/0/1/10", - "testdata/0/0/1/100", - "testdata/0/0/1/101", - "testdata/0/0/1/102", - "testdata/0/0/1/103", - "testdata/0/0/1/104", - "testdata/0/0/1/105", - "testdata/0/0/1/106", - "testdata/0/0/1/107", - "testdata/0/0/1/108", - "testdata/0/0/1/109", - "testdata/0/0/1/11", - "testdata/0/0/1/110", - "testdata/0/0/1/111", - "testdata/0/0/1/112", - "testdata/0/0/1/113", - "testdata/0/0/1/114", - "testdata/0/0/1/115", - "testdata/0/0/1/116", - "testdata/0/0/1/117", - "testdata/0/0/1/118", - "testdata/0/0/1/119", - "testdata/0/0/1/12", - "testdata/0/0/1/120", - "testdata/0/0/1/121", - "testdata/0/0/1/122", - "testdata/0/0/1/123", - "testdata/0/0/1/124", - "testdata/0/0/1/125", - "testdata/0/0/1/126", - "testdata/0/0/1/127", - "testdata/0/0/1/13", - "testdata/0/0/1/14", - "testdata/0/0/1/15", - "testdata/0/0/1/16", - "testdata/0/0/1/17", - "testdata/0/0/1/18", - "testdata/0/0/1/19", - "testdata/0/0/1/2", - "testdata/0/0/1/20", - "testdata/0/0/1/21", - "testdata/0/0/1/22", - "testdata/0/0/1/23", - "testdata/0/0/1/24", - "testdata/0/0/1/25", - "testdata/0/0/1/26", - "testdata/0/0/1/27", - "testdata/0/0/1/28", - "testdata/0/0/1/29", - "testdata/0/0/1/3", - "testdata/0/0/1/30", - "testdata/0/0/1/31", - "testdata/0/0/1/32", - "testdata/0/0/1/33", - "testdata/0/0/1/34", - "testdata/0/0/1/35", - "testdata/0/0/1/36", - "testdata/0/0/1/37", - "testdata/0/0/1/38", - "testdata/0/0/1/39", - "testdata/0/0/1/4", - "testdata/0/0/1/40", - "testdata/0/0/1/41", - "testdata/0/0/1/42", - "testdata/0/0/1/43", - "testdata/0/0/1/44", - "testdata/0/0/1/45", - "testdata/0/0/1/46", - "testdata/0/0/1/47", - "testdata/0/0/1/48", - "testdata/0/0/1/49", - "testdata/0/0/1/5", - "testdata/0/0/1/50", - "testdata/0/0/1/51", - "testdata/0/0/1/52", - "testdata/0/0/1/53", - "testdata/0/0/1/54", - "testdata/0/0/1/55", - "testdata/0/0/1/56", - "testdata/0/0/1/57", - "testdata/0/0/1/58", - "testdata/0/0/1/59", - "testdata/0/0/1/6", - "testdata/0/0/1/60", - "testdata/0/0/1/61", - "testdata/0/0/1/62", - "testdata/0/0/1/63", - "testdata/0/0/1/64", - "testdata/0/0/1/65", - "testdata/0/0/1/66", - "testdata/0/0/1/67", - "testdata/0/0/1/68", - "testdata/0/0/1/69", - "testdata/0/0/1/7", - "testdata/0/0/1/70", - "testdata/0/0/1/71", - "testdata/0/0/1/72", - "testdata/0/0/1/73", - "testdata/0/0/1/74", - "testdata/0/0/1/75", - "testdata/0/0/1/76", - "testdata/0/0/1/77", - "testdata/0/0/1/78", - "testdata/0/0/1/79", - "testdata/0/0/1/8", - "testdata/0/0/1/80", - "testdata/0/0/1/81", - "testdata/0/0/1/82", - "testdata/0/0/1/83", - "testdata/0/0/1/84", - "testdata/0/0/1/85", - "testdata/0/0/1/86", - "testdata/0/0/1/87", - "testdata/0/0/1/88", - "testdata/0/0/1/89", - "testdata/0/0/1/9", - "testdata/0/0/1/90", - "testdata/0/0/1/91", - "testdata/0/0/1/92", - "testdata/0/0/1/93", - "testdata/0/0/1/94", - "testdata/0/0/1/95", - "testdata/0/0/1/96", - "testdata/0/0/1/97", - "testdata/0/0/1/98", - "testdata/0/0/1/99", - "testdata/0/0/1", - "testdata/0/0/2/0", - "testdata/0/0/2/1", - "testdata/0/0/2/10", - "testdata/0/0/2/100", - "testdata/0/0/2/101", - "testdata/0/0/2/102", - "testdata/0/0/2/103", - "testdata/0/0/2/104", - "testdata/0/0/2/105", - "testdata/0/0/2/106", - "testdata/0/0/2/107", - "testdata/0/0/2/108", - "testdata/0/0/2/109", - "testdata/0/0/2/11", - "testdata/0/0/2/110", - "testdata/0/0/2/111", - "testdata/0/0/2/112", - "testdata/0/0/2/113", - "testdata/0/0/2/114", - "testdata/0/0/2/115", - "testdata/0/0/2/116", - "testdata/0/0/2/117", - "testdata/0/0/2/118", - "testdata/0/0/2/119", - "testdata/0/0/2/12", - "testdata/0/0/2/120", - "testdata/0/0/2/121", - "testdata/0/0/2/122", - "testdata/0/0/2/123", - "testdata/0/0/2/124", - "testdata/0/0/2/125", - "testdata/0/0/2/126", - "testdata/0/0/2/127", - "testdata/0/0/2/13", - "testdata/0/0/2/14", - "testdata/0/0/2/15", - "testdata/0/0/2/16", - "testdata/0/0/2/17", - "testdata/0/0/2/18", - "testdata/0/0/2/19", - "testdata/0/0/2/2", - "testdata/0/0/2/20", - "testdata/0/0/2/21", - "testdata/0/0/2/22", - "testdata/0/0/2/23", - "testdata/0/0/2/24", - "testdata/0/0/2/25", - "testdata/0/0/2/26", - "testdata/0/0/2/27", - "testdata/0/0/2/28", - "testdata/0/0/2/29", - "testdata/0/0/2/3", - "testdata/0/0/2/30", - "testdata/0/0/2/31", - "testdata/0/0/2/32", - "testdata/0/0/2/33", - "testdata/0/0/2/34", - "testdata/0/0/2/35", - "testdata/0/0/2/36", - "testdata/0/0/2/37", - "testdata/0/0/2/38", - "testdata/0/0/2/39", - "testdata/0/0/2/4", - "testdata/0/0/2/40", - "testdata/0/0/2/41", - "testdata/0/0/2/42", - "testdata/0/0/2/43", - "testdata/0/0/2/44", - "testdata/0/0/2/45", - "testdata/0/0/2/46", - "testdata/0/0/2/47", - "testdata/0/0/2/48", - "testdata/0/0/2/49", - "testdata/0/0/2/5", - "testdata/0/0/2/50", - "testdata/0/0/2/51", - "testdata/0/0/2/52", - "testdata/0/0/2/53", - "testdata/0/0/2/54", - "testdata/0/0/2/55", - "testdata/0/0/2/56", - "testdata/0/0/2/57", - "testdata/0/0/2/58", - "testdata/0/0/2/59", - "testdata/0/0/2/6", - "testdata/0/0/2/60", - "testdata/0/0/2/61", - "testdata/0/0/2/62", - "testdata/0/0/2/63", - "testdata/0/0/2/64", - "testdata/0/0/2/65", - "testdata/0/0/2/66", - "testdata/0/0/2/67", - "testdata/0/0/2/68", - "testdata/0/0/2/69", - "testdata/0/0/2/7", - "testdata/0/0/2/70", - "testdata/0/0/2/71", - "testdata/0/0/2/72", - "testdata/0/0/2/73", - "testdata/0/0/2/74", - "testdata/0/0/2/75", - "testdata/0/0/2/76", - "testdata/0/0/2/77", - "testdata/0/0/2/78", - "testdata/0/0/2/79", - "testdata/0/0/2/8", - "testdata/0/0/2/80", - "testdata/0/0/2/81", - "testdata/0/0/2/82", - "testdata/0/0/2/83", - "testdata/0/0/2/84", - "testdata/0/0/2/85", - "testdata/0/0/2/86", - "testdata/0/0/2/87", - "testdata/0/0/2/88", - "testdata/0/0/2/89", - "testdata/0/0/2/9", - "testdata/0/0/2/90", - "testdata/0/0/2/91", - "testdata/0/0/2/92", - "testdata/0/0/2/93", - "testdata/0/0/2/94", - "testdata/0/0/2/95", - "testdata/0/0/2/96", - "testdata/0/0/2/97", - "testdata/0/0/2/98", - "testdata/0/0/2/99", - "testdata/0/0/2", - "testdata/0/0/3/0", - "testdata/0/0/3/1", - "testdata/0/0/3/10", - "testdata/0/0/3/100", - "testdata/0/0/3/101", - "testdata/0/0/3/102", - "testdata/0/0/3/103", - "testdata/0/0/3/104", - "testdata/0/0/3/105", - "testdata/0/0/3/106", - "testdata/0/0/3/107", - "testdata/0/0/3/108", - "testdata/0/0/3/109", - "testdata/0/0/3/11", - "testdata/0/0/3/110", - "testdata/0/0/3/111", - "testdata/0/0/3/112", - "testdata/0/0/3/113", - "testdata/0/0/3/114", - "testdata/0/0/3/115", - "testdata/0/0/3/116", - "testdata/0/0/3/117", - "testdata/0/0/3/118", - "testdata/0/0/3/119", - "testdata/0/0/3/12", - "testdata/0/0/3/120", - "testdata/0/0/3/121", - "testdata/0/0/3/122", - "testdata/0/0/3/123", - "testdata/0/0/3/124", - "testdata/0/0/3/125", - "testdata/0/0/3/126", - "testdata/0/0/3/127", - "testdata/0/0/3/13", - "testdata/0/0/3/14", - "testdata/0/0/3/15", - "testdata/0/0/3/16", - "testdata/0/0/3/17", - "testdata/0/0/3/18", - "testdata/0/0/3/19", - "testdata/0/0/3/2", - "testdata/0/0/3/20", - "testdata/0/0/3/21", - "testdata/0/0/3/22", - "testdata/0/0/3/23", - "testdata/0/0/3/24", - "testdata/0/0/3/25", - "testdata/0/0/3/26", - "testdata/0/0/3/27", - "testdata/0/0/3/28", - "testdata/0/0/3/29", - "testdata/0/0/3/3", - "testdata/0/0/3/30", - "testdata/0/0/3/31", - "testdata/0/0/3/32", - "testdata/0/0/3/33", - "testdata/0/0/3/34", - "testdata/0/0/3/35", - "testdata/0/0/3/36", - "testdata/0/0/3/37", - "testdata/0/0/3/38", - "testdata/0/0/3/39", - "testdata/0/0/3/4", - "testdata/0/0/3/40", - "testdata/0/0/3/41", - "testdata/0/0/3/42", - "testdata/0/0/3/43", - "testdata/0/0/3/44", - "testdata/0/0/3/45", - "testdata/0/0/3/46", - "testdata/0/0/3/47", - "testdata/0/0/3/48", - "testdata/0/0/3/49", - "testdata/0/0/3/5", - "testdata/0/0/3/50", - "testdata/0/0/3/51", - "testdata/0/0/3/52", - "testdata/0/0/3/53", - "testdata/0/0/3/54", - "testdata/0/0/3/55", - "testdata/0/0/3/56", - "testdata/0/0/3/57", - "testdata/0/0/3/58", - "testdata/0/0/3/59", - "testdata/0/0/3/6", - "testdata/0/0/3/60", - "testdata/0/0/3/61", - "testdata/0/0/3/62", - "testdata/0/0/3/63", - "testdata/0/0/3/64", - "testdata/0/0/3/65", - "testdata/0/0/3/66", - "testdata/0/0/3/67", - "testdata/0/0/3/68", - "testdata/0/0/3/69", - "testdata/0/0/3/7", - "testdata/0/0/3/70", - "testdata/0/0/3/71", - "testdata/0/0/3/72", - "testdata/0/0/3/73", - "testdata/0/0/3/74", - "testdata/0/0/3/75", - "testdata/0/0/3/76", - "testdata/0/0/3/77", - "testdata/0/0/3/78", - "testdata/0/0/3/79", - "testdata/0/0/3/8", - "testdata/0/0/3/80", - "testdata/0/0/3/81", - "testdata/0/0/3/82", - "testdata/0/0/3/83", - "testdata/0/0/3/84", - "testdata/0/0/3/85", - "testdata/0/0/3/86", - "testdata/0/0/3/87", - "testdata/0/0/3/88", - "testdata/0/0/3/89", - "testdata/0/0/3/9", - "testdata/0/0/3/90", - "testdata/0/0/3/91", - "testdata/0/0/3/92", - "testdata/0/0/3/93", - "testdata/0/0/3/94", - "testdata/0/0/3/95", - "testdata/0/0/3/96", - "testdata/0/0/3/97", - "testdata/0/0/3/98", - "testdata/0/0/3/99", - "testdata/0/0/3", - "testdata/0/0/4/0", - "testdata/0/0/4/1", - "testdata/0/0/4/10", - "testdata/0/0/4/100", - "testdata/0/0/4/101", - "testdata/0/0/4/102", - "testdata/0/0/4/103", - "testdata/0/0/4/104", - "testdata/0/0/4/105", - "testdata/0/0/4/106", - "testdata/0/0/4/107", - "testdata/0/0/4/108", - "testdata/0/0/4/109", - "testdata/0/0/4/11", - "testdata/0/0/4/110", - "testdata/0/0/4/111", - "testdata/0/0/4/112", - "testdata/0/0/4/113", - "testdata/0/0/4/114", - "testdata/0/0/4/115", - "testdata/0/0/4/116", - "testdata/0/0/4/117", - "testdata/0/0/4/118", - "testdata/0/0/4/119", - "testdata/0/0/4/12", - "testdata/0/0/4/120", - "testdata/0/0/4/121", - "testdata/0/0/4/122", - "testdata/0/0/4/123", - "testdata/0/0/4/124", - "testdata/0/0/4/125", - "testdata/0/0/4/126", - "testdata/0/0/4/127", - "testdata/0/0/4/13", - "testdata/0/0/4/14", - "testdata/0/0/4/15", - "testdata/0/0/4/16", - "testdata/0/0/4/17", - "testdata/0/0/4/18", - "testdata/0/0/4/19", - "testdata/0/0/4/2", - "testdata/0/0/4/20", - "testdata/0/0/4/21", - "testdata/0/0/4/22", - "testdata/0/0/4/23", - "testdata/0/0/4/24", - "testdata/0/0/4/25", - "testdata/0/0/4/26", - "testdata/0/0/4/27", - "testdata/0/0/4/28", - "testdata/0/0/4/29", - "testdata/0/0/4/3", - "testdata/0/0/4/30", - "testdata/0/0/4/31", - "testdata/0/0/4/32", - "testdata/0/0/4/33", - "testdata/0/0/4/34", - "testdata/0/0/4/35", - "testdata/0/0/4/36", - "testdata/0/0/4/37", - "testdata/0/0/4/38", - "testdata/0/0/4/39", - "testdata/0/0/4/4", - "testdata/0/0/4/40", - "testdata/0/0/4/41", - "testdata/0/0/4/42", - "testdata/0/0/4/43", - "testdata/0/0/4/44", - "testdata/0/0/4/45", - "testdata/0/0/4/46", - "testdata/0/0/4/47", - "testdata/0/0/4/48", - "testdata/0/0/4/49", - "testdata/0/0/4/5", - "testdata/0/0/4/50", - "testdata/0/0/4/51", - "testdata/0/0/4/52", - "testdata/0/0/4/53", - "testdata/0/0/4/54", - "testdata/0/0/4/55", - "testdata/0/0/4/56", - "testdata/0/0/4/57", - "testdata/0/0/4/58", - "testdata/0/0/4/59", - "testdata/0/0/4/6", - "testdata/0/0/4/60", - "testdata/0/0/4/61", - "testdata/0/0/4/62", - "testdata/0/0/4/63", - "testdata/0/0/4/64", - "testdata/0/0/4/65", - "testdata/0/0/4/66", - "testdata/0/0/4/67", - "testdata/0/0/4/68", - "testdata/0/0/4/69", - "testdata/0/0/4/7", - "testdata/0/0/4/70", - "testdata/0/0/4/71", - "testdata/0/0/4/72", - "testdata/0/0/4/73", - "testdata/0/0/4/74", - "testdata/0/0/4/75", - "testdata/0/0/4/76", - "testdata/0/0/4/77", - "testdata/0/0/4/78", - "testdata/0/0/4/79", - "testdata/0/0/4/8", - "testdata/0/0/4/80", - "testdata/0/0/4/81", - "testdata/0/0/4/82", - "testdata/0/0/4/83", - "testdata/0/0/4/84", - "testdata/0/0/4/85", - "testdata/0/0/4/86", - "testdata/0/0/4/87", - "testdata/0/0/4/88", - "testdata/0/0/4/89", - "testdata/0/0/4/9", - "testdata/0/0/4/90", - "testdata/0/0/4/91", - "testdata/0/0/4/92", - "testdata/0/0/4/93", - "testdata/0/0/4/94", - "testdata/0/0/4/95", - "testdata/0/0/4/96", - "testdata/0/0/4/97", - "testdata/0/0/4/98", - "testdata/0/0/4/99", - "testdata/0/0/4", - "testdata/0/0/5/0", - "testdata/0/0/5/1", - "testdata/0/0/5/10", - "testdata/0/0/5/100", - "testdata/0/0/5/101", - "testdata/0/0/5/102", - "testdata/0/0/5/103", - "testdata/0/0/5/104", - "testdata/0/0/5/105", - "testdata/0/0/5/106", - "testdata/0/0/5/107", - "testdata/0/0/5/108", - "testdata/0/0/5/109", - "testdata/0/0/5/11", - "testdata/0/0/5/110", - "testdata/0/0/5/111", - "testdata/0/0/5/112", - "testdata/0/0/5/113", - "testdata/0/0/5/114", - "testdata/0/0/5/115", - "testdata/0/0/5/116", - "testdata/0/0/5/117", - "testdata/0/0/5/118", - "testdata/0/0/5/119", - "testdata/0/0/5/12", - "testdata/0/0/5/120", - "testdata/0/0/5/121", - "testdata/0/0/5/122", - "testdata/0/0/5/123", - "testdata/0/0/5/124", - "testdata/0/0/5/125", - "testdata/0/0/5/126", - "testdata/0/0/5/127", - "testdata/0/0/5/13", - "testdata/0/0/5/14", - "testdata/0/0/5/15", - "testdata/0/0/5/16", - "testdata/0/0/5/17", - "testdata/0/0/5/18", - "testdata/0/0/5/19", - "testdata/0/0/5/2", - "testdata/0/0/5/20", - "testdata/0/0/5/21", - "testdata/0/0/5/22", - "testdata/0/0/5/23", - "testdata/0/0/5/24", - "testdata/0/0/5/25", - "testdata/0/0/5/26", - "testdata/0/0/5/27", - "testdata/0/0/5/28", - "testdata/0/0/5/29", - "testdata/0/0/5/3", - "testdata/0/0/5/30", - "testdata/0/0/5/31", - "testdata/0/0/5/32", - "testdata/0/0/5/33", - "testdata/0/0/5/34", - "testdata/0/0/5/35", - "testdata/0/0/5/36", - "testdata/0/0/5/37", - "testdata/0/0/5/38", - "testdata/0/0/5/39", - "testdata/0/0/5/4", - "testdata/0/0/5/40", - "testdata/0/0/5/41", - "testdata/0/0/5/42", - "testdata/0/0/5/43", - "testdata/0/0/5/44", - "testdata/0/0/5/45", - "testdata/0/0/5/46", - "testdata/0/0/5/47", - "testdata/0/0/5/48", - "testdata/0/0/5/49", - "testdata/0/0/5/5", - "testdata/0/0/5/50", - "testdata/0/0/5/51", - "testdata/0/0/5/52", - "testdata/0/0/5/53", - "testdata/0/0/5/54", - "testdata/0/0/5/55", - "testdata/0/0/5/56", - "testdata/0/0/5/57", - "testdata/0/0/5/58", - "testdata/0/0/5/59", - "testdata/0/0/5/6", - "testdata/0/0/5/60", - "testdata/0/0/5/61", - "testdata/0/0/5/62", - "testdata/0/0/5/63", - "testdata/0/0/5/64", - "testdata/0/0/5/65", - "testdata/0/0/5/66", - "testdata/0/0/5/67", - "testdata/0/0/5/68", - "testdata/0/0/5/69", - "testdata/0/0/5/7", - "testdata/0/0/5/70", - "testdata/0/0/5/71", - "testdata/0/0/5/72", - "testdata/0/0/5/73", - "testdata/0/0/5/74", - "testdata/0/0/5/75", - "testdata/0/0/5/76", - "testdata/0/0/5/77", - "testdata/0/0/5/78", - "testdata/0/0/5/79", - "testdata/0/0/5/8", - "testdata/0/0/5/80", - "testdata/0/0/5/81", - "testdata/0/0/5/82", - "testdata/0/0/5/83", - "testdata/0/0/5/84", - "testdata/0/0/5/85", - "testdata/0/0/5/86", - "testdata/0/0/5/87", - "testdata/0/0/5/88", - "testdata/0/0/5/89", - "testdata/0/0/5/9", - "testdata/0/0/5/90", - "testdata/0/0/5/91", - "testdata/0/0/5/92", - "testdata/0/0/5/93", - "testdata/0/0/5/94", - "testdata/0/0/5/95", - "testdata/0/0/5/96", - "testdata/0/0/5/97", - "testdata/0/0/5/98", - "testdata/0/0/5/99", - "testdata/0/0/5", - "testdata/0/0/6/0", - "testdata/0/0/6/1", - "testdata/0/0/6/10", - "testdata/0/0/6/100", - "testdata/0/0/6/101", - "testdata/0/0/6/102", - "testdata/0/0/6/103", - "testdata/0/0/6/104", - "testdata/0/0/6/105", - "testdata/0/0/6/106", - "testdata/0/0/6/107", - "testdata/0/0/6/108", - "testdata/0/0/6/109", - "testdata/0/0/6/11", - "testdata/0/0/6/110", - "testdata/0/0/6/111", - "testdata/0/0/6/112", - "testdata/0/0/6/113", - "testdata/0/0/6/114", - "testdata/0/0/6/115", - "testdata/0/0/6/116", - "testdata/0/0/6/117", - "testdata/0/0/6/118", - "testdata/0/0/6/119", - "testdata/0/0/6/12", - "testdata/0/0/6/120", - "testdata/0/0/6/121", - "testdata/0/0/6/122", - "testdata/0/0/6/123", - "testdata/0/0/6/124", - "testdata/0/0/6/125", - "testdata/0/0/6/126", - "testdata/0/0/6/127", - "testdata/0/0/6/13", - "testdata/0/0/6/14", - "testdata/0/0/6/15", - "testdata/0/0/6/16", - "testdata/0/0/6/17", - "testdata/0/0/6/18", - "testdata/0/0/6/19", - "testdata/0/0/6/2", - "testdata/0/0/6/20", - "testdata/0/0/6/21", - "testdata/0/0/6/22", - "testdata/0/0/6/23", - "testdata/0/0/6/24", - "testdata/0/0/6/25", - "testdata/0/0/6/26", - "testdata/0/0/6/27", - "testdata/0/0/6/28", - "testdata/0/0/6/29", - "testdata/0/0/6/3", - "testdata/0/0/6/30", - "testdata/0/0/6/31", - "testdata/0/0/6/32", - "testdata/0/0/6/33", - "testdata/0/0/6/34", - "testdata/0/0/6/35", - "testdata/0/0/6/36", - "testdata/0/0/6/37", - "testdata/0/0/6/38", - "testdata/0/0/6/39", - "testdata/0/0/6/4", - "testdata/0/0/6/40", - "testdata/0/0/6/41", - "testdata/0/0/6/42", - "testdata/0/0/6/43", - "testdata/0/0/6/44", - "testdata/0/0/6/45", - "testdata/0/0/6/46", - "testdata/0/0/6/47", - "testdata/0/0/6/48", - "testdata/0/0/6/49", - "testdata/0/0/6/5", - "testdata/0/0/6/50", - "testdata/0/0/6/51", - "testdata/0/0/6/52", - "testdata/0/0/6/53", - "testdata/0/0/6/54", - "testdata/0/0/6/55", - "testdata/0/0/6/56", - "testdata/0/0/6/57", - "testdata/0/0/6/58", - "testdata/0/0/6/59", - "testdata/0/0/6/6", - "testdata/0/0/6/60", - "testdata/0/0/6/61", - "testdata/0/0/6/62", - "testdata/0/0/6/63", - "testdata/0/0/6/64", - "testdata/0/0/6/65", - "testdata/0/0/6/66", - "testdata/0/0/6/67", - "testdata/0/0/6/68", - "testdata/0/0/6/69", - "testdata/0/0/6/7", - "testdata/0/0/6/70", - "testdata/0/0/6/71", - "testdata/0/0/6/72", - "testdata/0/0/6/73", - "testdata/0/0/6/74", - "testdata/0/0/6/75", - "testdata/0/0/6/76", - "testdata/0/0/6/77", - "testdata/0/0/6/78", - "testdata/0/0/6/79", - "testdata/0/0/6/8", - "testdata/0/0/6/80", - "testdata/0/0/6/81", - "testdata/0/0/6/82", - "testdata/0/0/6/83", - "testdata/0/0/6/84", - "testdata/0/0/6/85", - "testdata/0/0/6/86", - "testdata/0/0/6/87", - "testdata/0/0/6/88", - "testdata/0/0/6/89", - "testdata/0/0/6/9", - "testdata/0/0/6/90", - "testdata/0/0/6/91", - "testdata/0/0/6/92", - "testdata/0/0/6/93", - "testdata/0/0/6/94", - "testdata/0/0/6/95", - "testdata/0/0/6/96", - "testdata/0/0/6/97", - "testdata/0/0/6/98", - "testdata/0/0/6/99", - "testdata/0/0/6", - "testdata/0/0/7/0", - "testdata/0/0/7/1", - "testdata/0/0/7/10", - "testdata/0/0/7/100", - "testdata/0/0/7/101", - "testdata/0/0/7/102", - "testdata/0/0/7/103", - "testdata/0/0/7/104", - "testdata/0/0/7/105", - "testdata/0/0/7/106", - "testdata/0/0/7/107", - "testdata/0/0/7/108", - "testdata/0/0/7/109", - "testdata/0/0/7/11", - "testdata/0/0/7/110", - "testdata/0/0/7/111", - "testdata/0/0/7/112", - "testdata/0/0/7/113", - "testdata/0/0/7/114", - "testdata/0/0/7/115", - "testdata/0/0/7/116", - "testdata/0/0/7/117", - "testdata/0/0/7/118", - "testdata/0/0/7/119", - "testdata/0/0/7/12", - "testdata/0/0/7/120", - "testdata/0/0/7/121", - "testdata/0/0/7/122", - "testdata/0/0/7/123", - "testdata/0/0/7/124", - "testdata/0/0/7/125", - "testdata/0/0/7/126", - "testdata/0/0/7/127", - "testdata/0/0/7/13", - "testdata/0/0/7/14", - "testdata/0/0/7/15", - "testdata/0/0/7/16", - "testdata/0/0/7/17", - "testdata/0/0/7/18", - "testdata/0/0/7/19", - "testdata/0/0/7/2", - "testdata/0/0/7/20", - "testdata/0/0/7/21", - "testdata/0/0/7/22", - "testdata/0/0/7/23", - "testdata/0/0/7/24", - "testdata/0/0/7/25", - "testdata/0/0/7/26", - "testdata/0/0/7/27", - "testdata/0/0/7/28", - "testdata/0/0/7/29", - "testdata/0/0/7/3", - "testdata/0/0/7/30", - "testdata/0/0/7/31", - "testdata/0/0/7/32", - "testdata/0/0/7/33", - "testdata/0/0/7/34", - "testdata/0/0/7/35", - "testdata/0/0/7/36", - "testdata/0/0/7/37", - "testdata/0/0/7/38", - "testdata/0/0/7/39", - "testdata/0/0/7/4", - "testdata/0/0/7/40", - "testdata/0/0/7/41", - "testdata/0/0/7/42", - "testdata/0/0/7/43", - "testdata/0/0/7/44", - "testdata/0/0/7/45", - "testdata/0/0/7/46", - "testdata/0/0/7/47", - "testdata/0/0/7/48", - "testdata/0/0/7/49", - "testdata/0/0/7/5", - "testdata/0/0/7/50", - "testdata/0/0/7/51", - "testdata/0/0/7/52", - "testdata/0/0/7/53", - "testdata/0/0/7/54", - "testdata/0/0/7/55", - "testdata/0/0/7/56", - "testdata/0/0/7/57", - "testdata/0/0/7/58", - "testdata/0/0/7/59", - "testdata/0/0/7/6", - "testdata/0/0/7/60", - "testdata/0/0/7/61", - "testdata/0/0/7/62", - "testdata/0/0/7/63", - "testdata/0/0/7/64", - "testdata/0/0/7/65", - "testdata/0/0/7/66", - "testdata/0/0/7/67", - "testdata/0/0/7/68", - "testdata/0/0/7/69", - "testdata/0/0/7/7", - "testdata/0/0/7/70", - "testdata/0/0/7/71", - "testdata/0/0/7/72", - "testdata/0/0/7/73", - "testdata/0/0/7/74", - "testdata/0/0/7/75", - "testdata/0/0/7/76", - "testdata/0/0/7/77", - "testdata/0/0/7/78", - "testdata/0/0/7/79", - "testdata/0/0/7/8", - "testdata/0/0/7/80", - "testdata/0/0/7/81", - "testdata/0/0/7/82", - "testdata/0/0/7/83", - "testdata/0/0/7/84", - "testdata/0/0/7/85", - "testdata/0/0/7/86", - "testdata/0/0/7/87", - "testdata/0/0/7/88", - "testdata/0/0/7/89", - "testdata/0/0/7/9", - "testdata/0/0/7/90", - "testdata/0/0/7/91", - "testdata/0/0/7/92", - "testdata/0/0/7/93", - "testdata/0/0/7/94", - "testdata/0/0/7/95", - "testdata/0/0/7/96", - "testdata/0/0/7/97", - "testdata/0/0/7/98", - "testdata/0/0/7/99", - "testdata/0/0/7", - "testdata/0/0/8/0", - "testdata/0/0/8/1", - "testdata/0/0/8/10", - "testdata/0/0/8/100", - "testdata/0/0/8/101", - "testdata/0/0/8/102", - "testdata/0/0/8/103", - "testdata/0/0/8/104", - "testdata/0/0/8/105", - "testdata/0/0/8/106", - "testdata/0/0/8/107", - "testdata/0/0/8/108", - "testdata/0/0/8/109", - "testdata/0/0/8/11", - "testdata/0/0/8/110", - "testdata/0/0/8/111", - "testdata/0/0/8/112", - "testdata/0/0/8/113", - "testdata/0/0/8/114", - "testdata/0/0/8/115", - "testdata/0/0/8/116", - "testdata/0/0/8/117", - "testdata/0/0/8/118", - "testdata/0/0/8/119", - "testdata/0/0/8/12", - "testdata/0/0/8/120", - "testdata/0/0/8/121", - "testdata/0/0/8/122", - "testdata/0/0/8/123", - "testdata/0/0/8/124", - "testdata/0/0/8/125", - "testdata/0/0/8/126", - "testdata/0/0/8/127", - "testdata/0/0/8/13", - "testdata/0/0/8/14", - "testdata/0/0/8/15", - "testdata/0/0/8/16", - "testdata/0/0/8/17", - "testdata/0/0/8/18", - "testdata/0/0/8/19", - "testdata/0/0/8/2", - "testdata/0/0/8/20", - "testdata/0/0/8/21", - "testdata/0/0/8/22", - "testdata/0/0/8/23", - "testdata/0/0/8/24", - "testdata/0/0/8/25", - "testdata/0/0/8/26", - "testdata/0/0/8/27", - "testdata/0/0/8/28", - "testdata/0/0/8/29", - "testdata/0/0/8/3", - "testdata/0/0/8/30", - "testdata/0/0/8/31", - "testdata/0/0/8/32", - "testdata/0/0/8/33", - "testdata/0/0/8/34", - "testdata/0/0/8/35", - "testdata/0/0/8/36", - "testdata/0/0/8/37", - "testdata/0/0/8/38", - "testdata/0/0/8/39", - "testdata/0/0/8/4", - "testdata/0/0/8/40", - "testdata/0/0/8/41", - "testdata/0/0/8/42", - "testdata/0/0/8/43", - "testdata/0/0/8/44", - "testdata/0/0/8/45", - "testdata/0/0/8/46", - "testdata/0/0/8/47", - "testdata/0/0/8/48", - "testdata/0/0/8/49", - "testdata/0/0/8/5", - "testdata/0/0/8/50", - "testdata/0/0/8/51", - "testdata/0/0/8/52", - "testdata/0/0/8/53", - "testdata/0/0/8/54", - "testdata/0/0/8/55", - "testdata/0/0/8/56", - "testdata/0/0/8/57", - "testdata/0/0/8/58", - "testdata/0/0/8/59", - "testdata/0/0/8/6", - "testdata/0/0/8/60", - "testdata/0/0/8/61", - "testdata/0/0/8/62", - "testdata/0/0/8/63", - "testdata/0/0/8/64", - "testdata/0/0/8/65", - "testdata/0/0/8/66", - "testdata/0/0/8/67", - "testdata/0/0/8/68", - "testdata/0/0/8/69", - "testdata/0/0/8/7", - "testdata/0/0/8/70", - "testdata/0/0/8/71", - "testdata/0/0/8/72", - "testdata/0/0/8/73", - "testdata/0/0/8/74", - "testdata/0/0/8/75", - "testdata/0/0/8/76", - "testdata/0/0/8/77", - "testdata/0/0/8/78", - "testdata/0/0/8/79", - "testdata/0/0/8/8", - "testdata/0/0/8/80", - "testdata/0/0/8/81", - "testdata/0/0/8/82", - "testdata/0/0/8/83", - "testdata/0/0/8/84", - "testdata/0/0/8/85", - "testdata/0/0/8/86", - "testdata/0/0/8/87", - "testdata/0/0/8/88", - "testdata/0/0/8/89", - "testdata/0/0/8/9", - "testdata/0/0/8/90", - "testdata/0/0/8/91", - "testdata/0/0/8/92", - "testdata/0/0/8/93", - "testdata/0/0/8/94", - "testdata/0/0/8/95", - "testdata/0/0/8/96", - "testdata/0/0/8/97", - "testdata/0/0/8/98", - "testdata/0/0/8/99", - "testdata/0/0/8", - "testdata/0/0/9/0", - "testdata/0/0/9/1", - "testdata/0/0/9/10", - "testdata/0/0/9/11", - "testdata/0/0/9/12", - "testdata/0/0/9/13", - "testdata/0/0/9/14", - "testdata/0/0/9/15", - "testdata/0/0/9/16", - "testdata/0/0/9/17", - "testdata/0/0/9/18", - "testdata/0/0/9/19", - "testdata/0/0/9/2", - "testdata/0/0/9/20", - "testdata/0/0/9/21", - "testdata/0/0/9/22", - "testdata/0/0/9/23", - "testdata/0/0/9/24", - "testdata/0/0/9/25", - "testdata/0/0/9/26", - "testdata/0/0/9/27", - "testdata/0/0/9/28", - "testdata/0/0/9/29", - "testdata/0/0/9/3", - "testdata/0/0/9/30", - "testdata/0/0/9/31", - "testdata/0/0/9/32", - "testdata/0/0/9/33", - "testdata/0/0/9/34", - "testdata/0/0/9/35", - "testdata/0/0/9/36", - "testdata/0/0/9/37", - "testdata/0/0/9/38", - "testdata/0/0/9/39", - "testdata/0/0/9/4", - "testdata/0/0/9/40", - "testdata/0/0/9/41", - "testdata/0/0/9/42", - "testdata/0/0/9/43", - "testdata/0/0/9/44", - "testdata/0/0/9/45", - "testdata/0/0/9/46", - "testdata/0/0/9/47", - "testdata/0/0/9/48", - "testdata/0/0/9/49", - "testdata/0/0/9/5", - "testdata/0/0/9/50", - "testdata/0/0/9/51", - "testdata/0/0/9/52", - "testdata/0/0/9/53", - "testdata/0/0/9/54", - "testdata/0/0/9/55", - "testdata/0/0/9/56", - "testdata/0/0/9/57", - "testdata/0/0/9/58", - "testdata/0/0/9/59", - "testdata/0/0/9/6", - "testdata/0/0/9/60", - "testdata/0/0/9/61", - "testdata/0/0/9/62", - "testdata/0/0/9/63", - "testdata/0/0/9/64", - "testdata/0/0/9/65", - "testdata/0/0/9/66", - "testdata/0/0/9/67", - "testdata/0/0/9/68", - "testdata/0/0/9/7", - "testdata/0/0/9/8", - "testdata/0/0/9/9", - "testdata/0/0/9", - "testdata/0/0", - "testdata/0", - "testdata", - "", -} - -func TestDelayedWalkTree(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) - defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) - rtest.OK(t, repo.LoadIndex(context.TODO())) - - root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - rtest.OK(t, err) - - dr := delayRepo{repo, 100 * time.Millisecond} - - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(context.TODO(), dr, root, treeJobs) - - i := 0 - for job := range treeJobs { - expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) - if job.Path != expectedPath { - t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) - } - i++ - } - - if i != len(walktreeTestItems) { - t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) - } -} - -func BenchmarkDelayedWalkTree(t *testing.B) { - repodir, cleanup := rtest.Env(t, repoFixture) - defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) - rtest.OK(t, repo.LoadIndex(context.TODO())) - - root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - rtest.OK(t, err) - - dr := delayRepo{repo, 10 * time.Millisecond} - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(context.TODO(), dr, root, treeJobs) - - for range treeJobs { - } - } -}