Merge pull request #1623 from restic/backend-relax-restrictions

backend: Relax requirement for new files
This commit is contained in:
Alexander Neumann 2018-02-18 12:56:52 +01:00
commit 29da86b473
7 changed files with 13 additions and 48 deletions

12
changelog/0.8.3/pull-1623 Normal file
View file

@ -0,0 +1,12 @@
Enhancement: Don't check for presence of files in the backend before writing
Before, all backend implementations were required to return an error if the
file that is to be written already exists in the backend. For most backends,
that means making a request (e.g. via HTTP) and returning an error when the
file already exists.
This is not accurate, the file could have been created between the HTTP request
testing for it, and when writing starts, so we've relaxed this requeriment,
which saves one additional HTTP request per newly added file.
https://github.com/restic/restic/pull/1623

View file

@ -135,16 +135,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
debug.Log("Save %v at %v", h, objName)
// Check key does not already exist
found, err := be.container.GetBlobReference(objName).Exists()
if err != nil {
return errors.Wrap(err, "GetBlobReference().Exists()")
}
if found {
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
be.sem.GetToken()
// wrap the reader so that net/http client cannot close the reader, return

View file

@ -200,12 +200,6 @@ func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) er
debug.Log("Save %v, name %v", h, name)
obj := be.bucket.Object(name)
_, err := obj.Attrs(ctx)
if err == nil {
debug.Log(" %v already exists", h)
return errors.New("key already exists")
}
w := obj.NewWriter(ctx)
n, err := io.Copy(w, rd)
debug.Log(" saved %d bytes, err %v", n, err)

View file

@ -218,13 +218,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
be.sem.GetToken()
// Check key does not already exist
if _, err := be.service.Objects.Get(be.bucketName, objName).Do(); err == nil {
debug.Log("%v already exists", h)
be.sem.ReleaseToken()
return errors.New("key already exists")
}
debug.Log("InsertObject(%v, %v)", be.bucketName, objName)
// Set chunk size to zero to disable resumable uploads.

View file

@ -235,13 +235,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
be.sem.GetToken()
defer be.sem.ReleaseToken()
// Check key does not already exist
_, err = be.client.StatObject(be.cfg.Bucket, objName, minio.StatObjectOptions{})
if err == nil {
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
var size int64 = -1
type lenner interface {

View file

@ -168,19 +168,6 @@ func (be *beSwift) Save(ctx context.Context, h restic.Handle, rd io.Reader) (err
be.sem.GetToken()
defer be.sem.ReleaseToken()
// Check key does not already exist
switch _, _, err = be.conn.Object(be.container, objName); err {
case nil:
debug.Log("%v already exists", h)
return errors.New("key already exists")
case swift.ObjectNotFound:
// Ok, that's what we want
default:
return errors.Wrap(err, "conn.Object")
}
encoding := "binary/octet-stream"
debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding)

View file

@ -764,14 +764,10 @@ func (s *Suite) TestBackend(t *testing.T) {
// test adding the first file again
ts := testStrings[0]
// create blob
h := restic.Handle{Type: tpe, Name: ts.id}
err := b.Save(context.TODO(), h, strings.NewReader(ts.data))
test.Assert(t, err != nil, "backend has allowed overwrite of existing blob: expected error for %v, got %v", h, err)
// remove and recreate
err = s.delayedRemove(t, b, h)
err := s.delayedRemove(t, b, h)
test.OK(t, err)
// test that the blob is gone