Compare commits

...

10 Commits

Author SHA1 Message Date
ducalex 2050f648f7
Merge 166e94d82e into e4f9bce384 2024-05-09 21:27:10 -04:00
Michael Eischer e4f9bce384
Merge pull request #4792 from restic/request-watchdog
backend: enforce that backend HTTP requests make progress
2024-05-09 23:55:30 +02:00
Michael Eischer 3740700ddc add http timeouts to changelog 2024-05-09 23:46:17 +02:00
Michael Eischer ebd01a4675 backend: add tests for watchdogRoundTripper 2024-05-09 23:46:17 +02:00
Michael Eischer 8778670232 backend: cancel stuck http requests
requests that make no upload or download progress within a timeout are
canceled.
2024-05-09 23:46:17 +02:00
Michael Eischer 0987c731ec backend: configure protocol-level connection health checks
This should detect a connection that is stuck for more than 2 minutes.
2024-05-09 23:46:17 +02:00
Alex Duchesne 166e94d82e webdav: removed unnecessary error checks 2024-04-07 17:35:11 -04:00
Alex Duchesne 0cb3ef1d92 webdav: added some structure similar to `mount` 2024-04-06 12:10:49 -04:00
Alex Duchesne 7782b7c38e webdav: open Windows Explorer when starting 2024-04-06 10:23:59 -04:00
Alex Duchesne ff7bfc534f Implemented WebDAV server to browse repositories 2024-04-05 23:21:24 -04:00
6 changed files with 804 additions and 2 deletions

View File

@ -4,5 +4,14 @@ Restic now downloads pack files in large chunks instead of using a streaming
download. This prevents failures due to interrupted streams. The `restore`
command now also retries downloading individual blobs that cannot be retrieved.
HTTP requests that are stuck for more than two minutes while uploading or
downloading are now forcibly interrupted. This ensures that stuck requests are
retried after a short timeout. These new request timeouts can temporarily be
disabled by setting the environment variable
`RESTIC_FEATURES=http-timeouts=false`. Note that this feature flag will be
removed in the next minor restic version.
https://github.com/restic/restic/issues/4627
https://github.com/restic/restic/issues/4193
https://github.com/restic/restic/pull/4605
https://github.com/restic/restic/pull/4792

469
cmd/restic/cmd_webdav.go Normal file
View File

@ -0,0 +1,469 @@
package main
import (
"context"
"io"
"net/http"
"os"
"os/exec"
"path"
"runtime"
"sort"
"strings"
"time"
"github.com/spf13/cobra"
"golang.org/x/net/webdav"
"github.com/restic/restic/internal/bloblru"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/walker"
)
var cmdWebdav = &cobra.Command{
Use: "webdav [flags] [ip:port]",
Short: "Serve the repository via WebDAV",
Long: `
The "webdav" command serves the repository via WebDAV. This is a
read-only mount.
Snapshot Directories
====================
If you need a different template for directories that contain snapshots,
you can pass a time template via --time-template and path templates via
--path-template.
Example time template without colons:
--time-template "2006-01-02_15-04-05"
You need to specify a sample format for exactly the following timestamp:
Mon Jan 2 15:04:05 -0700 MST 2006
For details please see the documentation for time.Format() at:
https://godoc.org/time#Time.Format
For path templates, you can use the following patterns which will be replaced:
%i by short snapshot ID
%I by long snapshot ID
%u by username
%h by hostname
%t by tags
%T by timestamp as specified by --time-template
The default path templates are:
"ids/%i"
"snapshots/%T"
"hosts/%h/%T"
"tags/%t/%T"
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runWebServer(cmd.Context(), webdavOptions, globalOptions, args)
},
}
type WebdavOptions struct {
restic.SnapshotFilter
TimeTemplate string
PathTemplates []string
}
var webdavOptions WebdavOptions
func init() {
cmdRoot.AddCommand(cmdWebdav)
cmdFlags := cmdWebdav.Flags()
initMultiSnapshotFilter(cmdFlags, &webdavOptions.SnapshotFilter, true)
cmdFlags.StringArrayVar(&webdavOptions.PathTemplates, "path-template", nil, "set `template` for path names (can be specified multiple times)")
cmdFlags.StringVar(&webdavOptions.TimeTemplate, "snapshot-template", "2006-01-02_15-04-05", "set `template` to use for snapshot dirs")
}
func runWebServer(ctx context.Context, opts WebdavOptions, gopts GlobalOptions, args []string) error {
if len(args) > 1 {
return errors.Fatal("wrong number of parameters")
}
// FIXME: Proper validation, also add support for IPv6
bindAddress := "127.0.0.1:3080"
if len(args) == 1 {
bindAddress = strings.ToLower(args[0])
}
if strings.Index(bindAddress, "http://") == 0 {
bindAddress = bindAddress[7:]
}
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return err
}
davFS := &webdavFS{
repo: repo,
root: webdavFSNode{
name: "",
mode: 0555 | os.ModeDir,
modTime: time.Now(),
children: make(map[string]*webdavFSNode),
},
blobCache: bloblru.New(64 << 20),
}
wd := &webdav.Handler{
FileSystem: davFS,
LockSystem: webdav.NewMemLS(),
}
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &webdavOptions.SnapshotFilter, nil) {
node := &webdavFSNode{
name: sn.ID().Str(),
mode: 0555 | os.ModeDir,
modTime: sn.Time,
children: nil,
snapshot: sn,
}
// Ignore PathTemplates for now because `fuse.snapshots_dir(struct)` is not accessible when building
// on Windows and it would be ridiculous to duplicate the code. It should be shared, somehow!
davFS.addNode("/ids/"+node.name, node)
davFS.addNode("/hosts/"+sn.Hostname+"/"+node.name, node)
davFS.addNode("/snapshots/"+sn.Time.Format(opts.TimeTemplate)+"/"+node.name, node)
for _, tag := range sn.Tags {
davFS.addNode("/tags/"+tag+"/"+node.name, node)
}
}
Printf("Now serving the repository at http://%s\n", bindAddress)
Printf("Tree contains %d snapshots\n", len(davFS.root.children))
Printf("When finished, quit with Ctrl-c here.\n")
// FIXME: Remove before PR, this is handy for testing but likely undesirable :)
if runtime.GOOS == "windows" {
browseURL := "\\\\" + strings.Replace(bindAddress, ":", "@", 1) + "\\DavWWWRoot"
exec.Command("explorer", browseURL).Start()
}
return http.ListenAndServe(bindAddress, wd)
}
// Implements webdav.FileSystem
type webdavFS struct {
repo restic.Repository
root webdavFSNode
// snapshots *restic.Snapshot
blobCache *bloblru.Cache
}
// Implements os.FileInfo
type webdavFSNode struct {
name string
mode os.FileMode
modTime time.Time
size int64
children map[string]*webdavFSNode
// Should be an interface to save on memory?
node *restic.Node
snapshot *restic.Snapshot
}
func (f *webdavFSNode) Name() string { return f.name }
func (f *webdavFSNode) Size() int64 { return f.size }
func (f *webdavFSNode) Mode() os.FileMode { return f.mode }
func (f *webdavFSNode) ModTime() time.Time { return f.modTime }
func (f *webdavFSNode) IsDir() bool { return f.mode.IsDir() }
func (f *webdavFSNode) Sys() interface{} { return nil }
func (fs *webdavFS) loadSnapshot(ctx context.Context, mountPoint string, sn *restic.Snapshot) {
Printf("Loading snapshot %s at %s\n", sn.ID().Str(), mountPoint)
// FIXME: Need a mutex here...
// FIXME: All this walking should be done dynamically when the client asks for a folder...
walker.Walk(ctx, fs.repo, *sn.Tree, walker.WalkVisitor{
ProcessNode: func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) error {
if err != nil || node == nil {
return err
}
fs.addNode(mountPoint+"/"+nodepath, &webdavFSNode{
name: node.Name,
mode: node.Mode,
modTime: node.ModTime,
size: int64(node.Size),
node: node,
// snapshot: sn,
})
return nil
},
})
}
func (fs *webdavFS) addNode(fullpath string, node *webdavFSNode) error {
fullpath = strings.Trim(path.Clean("/"+fullpath), "/")
if fullpath == "" {
return os.ErrInvalid
}
parts := strings.Split(fullpath, "/")
dir := &fs.root
for len(parts) > 0 {
part := parts[0]
parts = parts[1:]
if !dir.IsDir() {
return os.ErrInvalid
}
if dir.children == nil {
dir.children = make(map[string]*webdavFSNode)
}
if len(parts) == 0 {
dir.children[part] = node
dir.size = int64(len(dir.children))
return nil
}
if dir.children[part] == nil {
dir.children[part] = &webdavFSNode{
name: part,
mode: 0555 | os.ModeDir,
modTime: dir.modTime,
children: nil,
}
}
dir = dir.children[part]
}
return os.ErrInvalid
}
func (fs *webdavFS) findNode(fullname string) (*webdavFSNode, error) {
fullname = strings.Trim(path.Clean("/"+fullname), "/")
if fullname == "" {
return &fs.root, nil
}
parts := strings.Split(fullname, "/")
dir := &fs.root
for dir != nil {
node := dir.children[parts[0]]
parts = parts[1:]
if len(parts) == 0 {
return node, nil
}
dir = node
}
return nil, os.ErrNotExist
}
func (fs *webdavFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
debug.Log("OpenFile %s", name)
// Client can only read
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
return nil, os.ErrPermission
}
node, err := fs.findNode(name)
if err == os.ErrNotExist {
// FIXME: Walk up the tree to make sure the snapshot (if any) is loaded
}
if err != nil {
return nil, err
}
return &openFile{fullpath: path.Clean("/" + name), node: node, fs: fs}, nil
}
func (fs *webdavFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
node, err := fs.findNode(name)
if err != nil {
return nil, err
}
return node, nil
}
func (fs *webdavFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
return os.ErrPermission
}
func (fs *webdavFS) RemoveAll(ctx context.Context, name string) error {
return os.ErrPermission
}
func (fs *webdavFS) Rename(ctx context.Context, oldName, newName string) error {
return os.ErrPermission
}
type openFile struct {
fullpath string
node *webdavFSNode
fs *webdavFS
cursor int64
children []os.FileInfo
// cumsize[i] holds the cumulative size of blobs[:i].
cumsize []uint64
initialized bool
}
func (f *openFile) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
blob, ok := f.fs.blobCache.Get(f.node.node.Content[i])
if ok {
return blob, nil
}
blob, err = f.fs.repo.LoadBlob(ctx, restic.DataBlob, f.node.node.Content[i], nil)
if err != nil {
return nil, err
}
f.fs.blobCache.Add(f.node.node.Content[i], blob)
return blob, nil
}
func (f *openFile) Read(p []byte) (int, error) {
debug.Log("Read %s %d %d", f.fullpath, f.cursor, len(p))
if f.node.IsDir() || f.cursor < 0 {
return 0, os.ErrInvalid
}
if f.cursor >= f.node.Size() {
return 0, io.EOF
}
// We wait until the first read before we do anything because WebDAV clients tend to open
// everything and do nothing...
if !f.initialized {
var bytes uint64
cumsize := make([]uint64, 1+len(f.node.node.Content))
for i, id := range f.node.node.Content {
size, found := f.fs.repo.LookupBlobSize(id, restic.DataBlob)
if !found {
return 0, errors.Errorf("id %v not found in repository", id)
}
bytes += uint64(size)
cumsize[i+1] = bytes
}
if bytes != f.node.node.Size {
Printf("sizes do not match: node.Size %d != size %d", bytes, f.node.Size())
}
f.cumsize = cumsize
f.initialized = true
}
offset := uint64(f.cursor)
remainingBytes := uint64(len(p))
readBytes := 0
if offset+remainingBytes > uint64(f.node.Size()) {
remainingBytes = uint64(f.node.Size()) - remainingBytes
}
// Skip blobs before the offset
startContent := -1 + sort.Search(len(f.cumsize), func(i int) bool {
return f.cumsize[i] > offset
})
offset -= f.cumsize[startContent]
for i := startContent; remainingBytes > 0 && i < len(f.cumsize)-1; i++ {
blob, err := f.getBlobAt(context.TODO(), i)
if err != nil {
return 0, err
}
if offset > 0 {
blob = blob[offset:]
offset = 0
}
copied := copy(p, blob)
remainingBytes -= uint64(copied)
readBytes += copied
p = p[copied:]
}
f.cursor += int64(readBytes)
return readBytes, nil
}
func (f *openFile) Readdir(count int) ([]os.FileInfo, error) {
debug.Log("Readdir %s %d %d", f.fullpath, f.cursor, count)
if !f.node.IsDir() || f.cursor < 0 {
return nil, os.ErrInvalid
}
// We wait until the first read before we do anything because WebDAV clients tend to open
// everything and do nothing...
if !f.initialized {
// It's a snapshot, mount it
if f.node.snapshot != nil && f.node.children == nil {
f.fs.loadSnapshot(context.TODO(), f.fullpath, f.node.snapshot)
}
children := make([]os.FileInfo, 0, len(f.node.children))
for _, c := range f.node.children {
children = append(children, c)
}
f.children = children
f.initialized = true
}
if count <= 0 {
return f.children, nil
}
if f.cursor >= f.node.Size() {
return nil, io.EOF
}
start := f.cursor
f.cursor += int64(count)
if f.cursor > f.node.Size() {
f.cursor = f.node.Size()
}
return f.children[start:f.cursor], nil
}
func (f *openFile) Seek(offset int64, whence int) (int64, error) {
debug.Log("Seek %s %d %d", f.fullpath, offset, whence)
switch whence {
case io.SeekStart:
f.cursor = offset
case io.SeekCurrent:
f.cursor += offset
case io.SeekEnd:
f.cursor = f.node.Size() - offset
default:
return 0, os.ErrInvalid
}
return f.cursor, nil
}
func (f *openFile) Stat() (os.FileInfo, error) {
return f.node, nil
}
func (f *openFile) Write(p []byte) (int, error) {
return 0, os.ErrPermission
}
func (f *openFile) Close() error {
return nil
}

View File

@ -13,6 +13,8 @@ import (
"github.com/peterbourgon/unixtransport"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"golang.org/x/net/http2"
)
// TransportOptions collects various options which can be set for an HTTP based
@ -74,7 +76,6 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) {
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
@ -83,6 +84,17 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) {
TLSClientConfig: &tls.Config{},
}
// ensure that http2 connections are closed if they are broken
h2, err := http2.ConfigureTransports(tr)
if err != nil {
panic(err)
}
if feature.Flag.Enabled(feature.HTTPTimeouts) {
h2.WriteByteTimeout = 120 * time.Second
h2.ReadIdleTimeout = 60 * time.Second
h2.PingTimeout = 60 * time.Second
}
unixtransport.Register(tr)
if opts.InsecureTLS {
@ -119,6 +131,11 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) {
tr.TLSClientConfig.RootCAs = pool
}
rt := http.RoundTripper(tr)
if feature.Flag.Enabled(feature.HTTPTimeouts) {
rt = newWatchdogRoundtripper(rt, 120*time.Second, 128*1024)
}
// wrap in the debug round tripper (if active)
return debug.RoundTripper(tr), nil
return debug.RoundTripper(rt), nil
}

View File

@ -0,0 +1,104 @@
package backend
import (
"context"
"io"
"net/http"
"time"
)
// watchdogRoundtripper cancels an http request if an upload or download did not make progress
// within timeout. The time between fully sending the request and receiving an response is also
// limited by this timeout. This ensures that stuck requests are cancelled after some time.
//
// The roundtriper makes the assumption that the upload and download happen continuously. In particular,
// the caller must not make long pauses between individual read requests from the response body.
type watchdogRoundtripper struct {
rt http.RoundTripper
timeout time.Duration
chunkSize int
}
var _ http.RoundTripper = &watchdogRoundtripper{}
func newWatchdogRoundtripper(rt http.RoundTripper, timeout time.Duration, chunkSize int) *watchdogRoundtripper {
return &watchdogRoundtripper{
rt: rt,
timeout: timeout,
chunkSize: chunkSize,
}
}
func (w *watchdogRoundtripper) RoundTrip(req *http.Request) (*http.Response, error) {
timer := time.NewTimer(w.timeout)
ctx, cancel := context.WithCancel(req.Context())
// cancel context if timer expires
go func() {
defer timer.Stop()
select {
case <-timer.C:
cancel()
case <-ctx.Done():
}
}()
kick := func() {
timer.Reset(w.timeout)
}
req = req.Clone(ctx)
if req.Body != nil {
// kick watchdog timer as long as uploading makes progress
req.Body = newWatchdogReadCloser(req.Body, w.chunkSize, kick, nil)
}
resp, err := w.rt.RoundTrip(req)
if err != nil {
return nil, err
}
// kick watchdog timer as long as downloading makes progress
// cancel context to stop goroutine once response body is closed
resp.Body = newWatchdogReadCloser(resp.Body, w.chunkSize, kick, cancel)
return resp, nil
}
func newWatchdogReadCloser(rc io.ReadCloser, chunkSize int, kick func(), close func()) *watchdogReadCloser {
return &watchdogReadCloser{
rc: rc,
chunkSize: chunkSize,
kick: kick,
close: close,
}
}
type watchdogReadCloser struct {
rc io.ReadCloser
chunkSize int
kick func()
close func()
}
var _ io.ReadCloser = &watchdogReadCloser{}
func (w *watchdogReadCloser) Read(p []byte) (n int, err error) {
w.kick()
// Read is not required to fill the whole passed in byte slice
// Thus, keep things simple and just stay within our chunkSize.
if len(p) > w.chunkSize {
p = p[:w.chunkSize]
}
n, err = w.rc.Read(p)
w.kick()
return n, err
}
func (w *watchdogReadCloser) Close() error {
if w.close != nil {
w.close()
}
return w.rc.Close()
}

View File

@ -0,0 +1,201 @@
package backend
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
rtest "github.com/restic/restic/internal/test"
)
func TestRead(t *testing.T) {
data := []byte("abcdef")
var ctr int
kick := func() {
ctr++
}
var closed bool
onClose := func() {
closed = true
}
wd := newWatchdogReadCloser(io.NopCloser(bytes.NewReader(data)), 1, kick, onClose)
out, err := io.ReadAll(wd)
rtest.OK(t, err)
rtest.Equals(t, data, out, "data mismatch")
// the EOF read also triggers the kick function
rtest.Equals(t, len(data)*2+2, ctr, "unexpected number of kick calls")
rtest.Equals(t, false, closed, "close function called too early")
rtest.OK(t, wd.Close())
rtest.Equals(t, true, closed, "close function not called")
}
func TestRoundtrip(t *testing.T) {
t.Parallel()
// at the higher delay values, it takes longer to transmit the request/response body
// than the roundTripper timeout
for _, delay := range []int{0, 1, 10, 20} {
t.Run(fmt.Sprintf("%v", delay), func(t *testing.T) {
msg := []byte("ping-pong-data")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
w.WriteHeader(200)
// slowly send the reply
for len(data) >= 2 {
_, _ = w.Write(data[:2])
w.(http.Flusher).Flush()
data = data[2:]
time.Sleep(time.Duration(delay) * time.Millisecond)
}
_, _ = w.Write(data)
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 50*time.Millisecond, 2)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), time.Duration(delay)*time.Millisecond)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.OK(t, err)
rtest.Equals(t, 200, resp.StatusCode, "unexpected status code")
response, err := io.ReadAll(resp.Body)
rtest.OK(t, err)
rtest.Equals(t, msg, response, "unexpected response")
rtest.OK(t, resp.Body.Close())
})
}
}
func TestCanceledRoundtrip(t *testing.T) {
rt := newWatchdogRoundtripper(http.DefaultTransport, time.Second, 2)
ctx, cancel := context.WithCancel(context.Background())
cancel()
req, err := http.NewRequestWithContext(ctx, "GET", "http://some.random.url.dfdgsfg", nil)
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.Equals(t, context.Canceled, err)
// make linter happy
if resp != nil {
rtest.OK(t, resp.Body.Close())
}
}
type slowReader struct {
data io.Reader
delay time.Duration
}
func newSlowReader(data io.Reader, delay time.Duration) *slowReader {
return &slowReader{
data: data,
delay: delay,
}
}
func (s *slowReader) Read(p []byte) (n int, err error) {
time.Sleep(s.delay)
return s.data.Read(p)
}
func TestUploadTimeout(t *testing.T) {
t.Parallel()
msg := []byte("ping")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
t.Error("upload should have been canceled")
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(newSlowReader(bytes.NewReader(msg), 100*time.Millisecond)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.Equals(t, context.Canceled, err)
// make linter happy
if resp != nil {
rtest.OK(t, resp.Body.Close())
}
}
func TestProcessingTimeout(t *testing.T) {
t.Parallel()
msg := []byte("ping")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
time.Sleep(100 * time.Millisecond)
w.WriteHeader(200)
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.Equals(t, context.Canceled, err)
// make linter happy
if resp != nil {
rtest.OK(t, resp.Body.Close())
}
}
func TestDownloadTimeout(t *testing.T) {
t.Parallel()
msg := []byte("ping")
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
data, err := io.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
return
}
w.WriteHeader(200)
_, _ = w.Write(data[:2])
w.(http.Flusher).Flush()
data = data[2:]
time.Sleep(100 * time.Millisecond)
_, _ = w.Write(data)
}))
defer srv.Close()
rt := newWatchdogRoundtripper(http.DefaultTransport, 10*time.Millisecond, 1024)
req, err := http.NewRequestWithContext(context.TODO(), "GET", srv.URL, io.NopCloser(bytes.NewReader(msg)))
rtest.OK(t, err)
resp, err := rt.RoundTrip(req)
rtest.OK(t, err)
rtest.Equals(t, 200, resp.StatusCode, "unexpected status code")
_, err = io.ReadAll(resp.Body)
rtest.Equals(t, context.Canceled, err, "response download not canceled")
rtest.OK(t, resp.Body.Close())
}

View File

@ -8,6 +8,7 @@ const (
DeprecateLegacyIndex FlagName = "deprecate-legacy-index"
DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout"
DeviceIDForHardlinks FlagName = "device-id-for-hardlinks"
HTTPTimeouts FlagName = "http-timeouts"
)
func init() {
@ -15,5 +16,6 @@ func init() {
DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."},
DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."},
DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"},
HTTPTimeouts: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests."},
})
}