Cache size of last ReadAt on S3 for performance

Each obj.Stat() call adds another request to the S3 endpoint
for some commands a lot of ReadAt calls are made for the same
object in S3. This patch essentially cuts the number of calls
to S3 in this case in half. Speeding up the progress and lowering
costs to S3.
This commit is contained in:
trbs 2017-03-14 13:51:34 +01:00
parent bf30b2831b
commit b523eef294

View file

@ -20,10 +20,12 @@ const connLimit = 40
// s3 is a backend which stores the data on an S3 endpoint.
type s3 struct {
client *minio.Client
connChan chan struct{}
bucketname string
prefix string
client *minio.Client
connChan chan struct{}
bucketname string
prefix string
cacheObjName string
cacheSize int64
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
@ -139,6 +141,7 @@ func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, er
}
var obj *minio.Object
var size int64
objName := be.s3path(h)
@ -186,20 +189,27 @@ func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, er
}()
// otherwise use a buffer with ReadAt
info, err := obj.Stat()
if err != nil {
_ = obj.Close()
return nil, errors.Wrap(err, "obj.Stat")
if be.cacheObjName == objName {
size = be.cacheSize
} else {
info, err := obj.Stat()
if err != nil {
_ = obj.Close()
return nil, errors.Wrap(err, "obj.Stat")
}
size = info.Size
be.cacheObjName = objName
be.cacheSize = size
}
if offset > info.Size {
if offset > size {
_ = obj.Close()
return nil, errors.New("offset larger than file size")
}
l := int64(length)
if offset+l > info.Size {
l = info.Size - offset
if offset+l > size {
l = size - offset
}
buf := make([]byte, l)