2015-05-10 15:20:58 +00:00
|
|
|
package s3
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"errors"
|
|
|
|
"io"
|
|
|
|
"strings"
|
|
|
|
|
2015-11-06 21:31:59 +00:00
|
|
|
"github.com/minio/minio-go"
|
2015-05-10 15:20:58 +00:00
|
|
|
|
|
|
|
"github.com/restic/restic/backend"
|
2015-12-28 17:55:15 +00:00
|
|
|
"github.com/restic/restic/debug"
|
2015-05-10 15:20:58 +00:00
|
|
|
)
|
|
|
|
|
2015-05-13 17:48:52 +00:00
|
|
|
const maxKeysInList = 1000
|
2015-06-14 12:17:38 +00:00
|
|
|
const connLimit = 10
|
2015-06-14 13:02:29 +00:00
|
|
|
const backendPrefix = "restic"
|
2015-05-10 15:20:58 +00:00
|
|
|
|
|
|
|
func s3path(t backend.Type, name string) string {
|
|
|
|
if t == backend.Config {
|
2015-06-14 13:02:29 +00:00
|
|
|
return backendPrefix + "/" + string(t)
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
2015-06-14 13:02:29 +00:00
|
|
|
return backendPrefix + "/" + string(t) + "/" + name
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
2015-06-14 13:08:39 +00:00
|
|
|
type S3Backend struct {
|
2015-12-28 23:27:29 +00:00
|
|
|
client minio.CloudStorageClient
|
2015-11-06 21:31:59 +00:00
|
|
|
connChan chan struct{}
|
|
|
|
bucketname string
|
2015-05-15 22:29:48 +00:00
|
|
|
}
|
|
|
|
|
2015-12-28 17:55:15 +00:00
|
|
|
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
|
|
|
// does not exist yet.
|
2015-12-28 17:23:02 +00:00
|
|
|
func Open(cfg Config) (backend.Backend, error) {
|
2015-12-28 23:27:29 +00:00
|
|
|
debug.Log("s3.Open", "open, config %#v", cfg)
|
2015-05-10 15:20:58 +00:00
|
|
|
|
2015-12-28 23:27:29 +00:00
|
|
|
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, cfg.UseHTTP)
|
2015-12-06 22:21:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-08-26 11:25:05 +00:00
|
|
|
}
|
|
|
|
|
2015-12-28 23:27:29 +00:00
|
|
|
be := &S3Backend{client: client, bucketname: cfg.Bucket}
|
2015-12-06 22:21:48 +00:00
|
|
|
be.createConnections()
|
|
|
|
|
2016-01-03 20:46:07 +00:00
|
|
|
if err := client.BucketExists(cfg.Bucket); err != nil {
|
2016-01-09 20:24:21 +00:00
|
|
|
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
|
2015-12-28 17:55:15 +00:00
|
|
|
|
2016-01-03 20:46:07 +00:00
|
|
|
// create new bucket with default ACL in default region
|
|
|
|
err = client.MakeBucket(cfg.Bucket, "", "")
|
2015-12-28 17:55:15 +00:00
|
|
|
|
2016-01-03 20:46:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-06 21:31:59 +00:00
|
|
|
}
|
|
|
|
|
2015-12-06 22:21:48 +00:00
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (be *S3Backend) createConnections() {
|
|
|
|
be.connChan = make(chan struct{}, connLimit)
|
|
|
|
for i := 0; i < connLimit; i++ {
|
|
|
|
be.connChan <- struct{}{}
|
|
|
|
}
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
2015-05-13 17:48:52 +00:00
|
|
|
// Location returns this backend's location (the bucket name).
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) Location() string {
|
2015-11-06 21:31:59 +00:00
|
|
|
return be.bucketname
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type s3Blob struct {
|
2015-06-14 13:08:39 +00:00
|
|
|
b *S3Backend
|
2015-05-10 15:20:58 +00:00
|
|
|
buf *bytes.Buffer
|
|
|
|
final bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bb *s3Blob) Write(p []byte) (int, error) {
|
|
|
|
if bb.final {
|
|
|
|
return 0, errors.New("blob already closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := bb.buf.Write(p)
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bb *s3Blob) Read(p []byte) (int, error) {
|
|
|
|
return bb.buf.Read(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bb *s3Blob) Close() error {
|
|
|
|
bb.final = true
|
|
|
|
bb.buf.Reset()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bb *s3Blob) Size() uint {
|
|
|
|
return uint(bb.buf.Len())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bb *s3Blob) Finalize(t backend.Type, name string) error {
|
2016-01-03 20:46:07 +00:00
|
|
|
debug.Log("s3.blob.Finalize()", "bucket %v, finalize %v, %d bytes", bb.b.bucketname, name, bb.buf.Len())
|
2015-05-10 15:20:58 +00:00
|
|
|
if bb.final {
|
|
|
|
return errors.New("Already finalized")
|
|
|
|
}
|
|
|
|
|
|
|
|
bb.final = true
|
|
|
|
|
|
|
|
path := s3path(t, name)
|
2015-05-16 11:48:55 +00:00
|
|
|
|
|
|
|
// Check key does not already exist
|
2015-12-28 23:27:29 +00:00
|
|
|
_, err := bb.b.client.StatObject(bb.b.bucketname, path)
|
2015-08-26 11:25:05 +00:00
|
|
|
if err == nil {
|
2016-01-03 20:46:07 +00:00
|
|
|
debug.Log("s3.blob.Finalize()", "%v already exists", name)
|
2015-12-06 22:21:48 +00:00
|
|
|
return errors.New("key already exists")
|
2015-05-16 11:48:55 +00:00
|
|
|
}
|
|
|
|
|
2016-01-02 13:38:45 +00:00
|
|
|
expectedBytes := bb.buf.Len()
|
|
|
|
|
2015-06-14 12:17:38 +00:00
|
|
|
<-bb.b.connChan
|
2016-01-03 20:46:07 +00:00
|
|
|
debug.Log("s3.Finalize", "PutObject(%v, %v, %v, %v)",
|
|
|
|
bb.b.bucketname, path, int64(bb.buf.Len()), "binary/octet-stream")
|
2016-01-07 20:06:54 +00:00
|
|
|
n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, "binary/octet-stream")
|
2016-01-03 20:46:07 +00:00
|
|
|
debug.Log("s3.Finalize", "finalized %v -> n %v, err %#v", path, n, err)
|
2015-06-14 12:17:38 +00:00
|
|
|
bb.b.connChan <- struct{}{}
|
2015-12-28 23:27:29 +00:00
|
|
|
|
2016-01-02 13:38:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-28 23:27:29 +00:00
|
|
|
|
2016-01-02 13:38:45 +00:00
|
|
|
if n != int64(expectedBytes) {
|
|
|
|
return errors.New("could not store all bytes")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates a new Blob. The data is available only after Finalize()
|
|
|
|
// has been called on the returned Blob.
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) Create() (backend.Blob, error) {
|
2015-05-10 15:20:58 +00:00
|
|
|
blob := s3Blob{
|
2015-06-14 13:08:39 +00:00
|
|
|
b: be,
|
2015-05-10 15:20:58 +00:00
|
|
|
buf: &bytes.Buffer{},
|
|
|
|
}
|
|
|
|
|
|
|
|
return &blob, nil
|
|
|
|
}
|
|
|
|
|
2016-01-23 13:12:12 +00:00
|
|
|
// Load returns the data stored in the backend for h at the given offset
|
|
|
|
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
|
|
|
func (be S3Backend) Load(h backend.Handle, p []byte, off int64) (int, error) {
|
|
|
|
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
|
|
|
|
path := s3path(h.Type, h.Name)
|
|
|
|
obj, err := be.client.GetObject(be.bucketname, path)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("s3.GetReader", " err %v", err)
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if off > 0 {
|
|
|
|
_, err = obj.Seek(off, 0)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-24 00:15:35 +00:00
|
|
|
<-be.connChan
|
|
|
|
defer func() {
|
|
|
|
be.connChan <- struct{}{}
|
|
|
|
}()
|
2016-01-23 13:12:12 +00:00
|
|
|
return io.ReadFull(obj, p)
|
|
|
|
}
|
|
|
|
|
2016-01-24 00:15:35 +00:00
|
|
|
// Save stores data in the backend at the handle.
|
|
|
|
func (be S3Backend) Save(h backend.Handle, p []byte) (err error) {
|
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("s3.Save", "%v bytes at %d", len(p), h)
|
|
|
|
|
|
|
|
path := s3path(h.Type, h.Name)
|
|
|
|
|
|
|
|
<-be.connChan
|
|
|
|
defer func() {
|
|
|
|
be.connChan <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
|
|
|
|
be.bucketname, path, int64(len(p)), "binary/octet-stream")
|
|
|
|
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
|
|
|
|
debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-01-23 22:27:58 +00:00
|
|
|
// Stat returns information about a blob.
|
|
|
|
func (be S3Backend) Stat(h backend.Handle) (backend.BlobInfo, error) {
|
|
|
|
debug.Log("s3.Stat", "%v")
|
|
|
|
path := s3path(h.Type, h.Name)
|
|
|
|
obj, err := be.client.GetObject(be.bucketname, path)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("s3.Stat", "GetObject() err %v", err)
|
|
|
|
return backend.BlobInfo{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := obj.Stat()
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("s3.Stat", "Stat() err %v", err)
|
|
|
|
return backend.BlobInfo{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return backend.BlobInfo{Size: fi.Size}, nil
|
|
|
|
}
|
|
|
|
|
2015-05-10 15:20:58 +00:00
|
|
|
// Test returns true if a blob of the given type and name exists in the backend.
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
|
2015-05-10 15:20:58 +00:00
|
|
|
found := false
|
|
|
|
path := s3path(t, name)
|
2015-12-28 23:27:29 +00:00
|
|
|
_, err := be.client.StatObject(be.bucketname, path)
|
2015-08-26 11:25:05 +00:00
|
|
|
if err == nil {
|
2015-05-10 15:20:58 +00:00
|
|
|
found = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// If error, then not found
|
|
|
|
return found, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) Remove(t backend.Type, name string) error {
|
2015-05-10 15:20:58 +00:00
|
|
|
path := s3path(t, name)
|
2015-12-28 23:27:29 +00:00
|
|
|
err := be.client.RemoveObject(be.bucketname, path)
|
|
|
|
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
|
|
|
|
return err
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List returns a channel that yields all names of blobs of type t. A
|
|
|
|
// goroutine is started for this. If the channel done is closed, sending
|
|
|
|
// stops.
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
|
2016-01-03 20:46:07 +00:00
|
|
|
debug.Log("s3.List", "listing %v", t)
|
2015-05-10 15:20:58 +00:00
|
|
|
ch := make(chan string)
|
|
|
|
|
2015-06-14 13:02:29 +00:00
|
|
|
prefix := s3path(t, "")
|
2015-05-10 15:20:58 +00:00
|
|
|
|
2015-12-28 23:27:29 +00:00
|
|
|
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
|
2015-05-13 17:48:52 +00:00
|
|
|
|
2015-05-10 15:20:58 +00:00
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
2015-11-06 21:31:59 +00:00
|
|
|
for obj := range listresp {
|
2015-12-28 23:27:29 +00:00
|
|
|
m := strings.TrimPrefix(obj.Key, prefix)
|
2015-05-10 15:20:58 +00:00
|
|
|
if m == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case ch <- m:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2015-12-06 22:21:48 +00:00
|
|
|
// Remove keys for a specified backend type.
|
|
|
|
func (be *S3Backend) removeKeys(t backend.Type) error {
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
for key := range be.List(backend.Data, done) {
|
|
|
|
err := be.Remove(backend.Data, key)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-06-14 13:02:29 +00:00
|
|
|
}
|
2015-12-06 22:21:48 +00:00
|
|
|
|
|
|
|
return nil
|
2015-06-14 13:02:29 +00:00
|
|
|
}
|
|
|
|
|
2015-12-19 12:23:05 +00:00
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) Delete() error {
|
2015-12-06 22:21:48 +00:00
|
|
|
alltypes := []backend.Type{
|
|
|
|
backend.Data,
|
|
|
|
backend.Key,
|
|
|
|
backend.Lock,
|
|
|
|
backend.Snapshot,
|
|
|
|
backend.Index}
|
|
|
|
|
|
|
|
for _, t := range alltypes {
|
|
|
|
err := be.removeKeys(t)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-19 12:23:05 +00:00
|
|
|
return be.Remove(backend.Config, "")
|
2015-05-10 15:20:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close does nothing
|
2015-06-14 13:08:39 +00:00
|
|
|
func (be *S3Backend) Close() error { return nil }
|