s3: add backend restore command to restore objects from GLACIER
See: https://forum.rclone.org/t/rclone-settier-fails-with-scaleway-entitytoolarge/17384
This commit is contained in:
parent
49a7d08a40
commit
fd7c63bc78
1 changed files with 123 additions and 0 deletions
123
backend/s3/s3.go
123
backend/s3/s3.go
|
@ -52,6 +52,7 @@ import (
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
@ -70,6 +71,7 @@ func init() {
|
||||||
Name: "s3",
|
Name: "s3",
|
||||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: fs.ConfigProvider,
|
Name: fs.ConfigProvider,
|
||||||
Help: "Choose your S3 provider.",
|
Help: "Choose your S3 provider.",
|
||||||
|
@ -2040,6 +2042,126 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||||
return httpReq.Presign(time.Duration(expire))
|
return httpReq.Presign(time.Duration(expire))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var commandHelp = []fs.CommandHelp{{
|
||||||
|
Name: "restore",
|
||||||
|
Short: "Restore objects from GLACIER to normal storage",
|
||||||
|
Long: `This command can be used to restore one or more objects from GLACIER
|
||||||
|
to normal storage.
|
||||||
|
|
||||||
|
Usage Examples:
|
||||||
|
|
||||||
|
rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||||
|
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||||
|
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||||
|
|
||||||
|
This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags
|
||||||
|
|
||||||
|
rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||||
|
|
||||||
|
All the objects shown will be marked for restore, then
|
||||||
|
|
||||||
|
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||||
|
|
||||||
|
It returns a list of status dictionaries with Remote and Status
|
||||||
|
keys. The Status will be OK if it was successfull or an error message
|
||||||
|
if not.
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"Status": "OK",
|
||||||
|
"Path": "test.txt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Status": "OK",
|
||||||
|
"Path": "test/file4.txt"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
`,
|
||||||
|
Opts: map[string]string{
|
||||||
|
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
||||||
|
"lifetime": "Lifetime of the active copy in days",
|
||||||
|
"description": "The optional description for the job.",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
// Command the backend to run a named command
|
||||||
|
//
|
||||||
|
// The command run is name
|
||||||
|
// args may be used to read arguments from
|
||||||
|
// opts may be used to read optional arguments from
|
||||||
|
//
|
||||||
|
// The result should be capable of being JSON encoded
|
||||||
|
// If it is a string or a []string it will be shown to the user
|
||||||
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||||
|
switch name {
|
||||||
|
case "restore":
|
||||||
|
req := s3.RestoreObjectInput{
|
||||||
|
//Bucket: &f.rootBucket,
|
||||||
|
//Key: &encodedDirectory,
|
||||||
|
RestoreRequest: &s3.RestoreRequest{},
|
||||||
|
}
|
||||||
|
if lifetime := opt["lifetime"]; lifetime != "" {
|
||||||
|
ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "bad lifetime")
|
||||||
|
}
|
||||||
|
req.RestoreRequest.Days = &ilifetime
|
||||||
|
}
|
||||||
|
if priority := opt["priority"]; priority != "" {
|
||||||
|
req.RestoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{
|
||||||
|
Tier: &priority,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if description := opt["description"]; description != "" {
|
||||||
|
req.RestoreRequest.Description = &description
|
||||||
|
}
|
||||||
|
type status struct {
|
||||||
|
Status string
|
||||||
|
Remote string
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
outMu sync.Mutex
|
||||||
|
out = []status{}
|
||||||
|
)
|
||||||
|
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||||
|
// Remember this is run --checkers times concurrently
|
||||||
|
o, ok := obj.(*Object)
|
||||||
|
st := status{Status: "OK", Remote: obj.Remote()}
|
||||||
|
defer func() {
|
||||||
|
outMu.Lock()
|
||||||
|
out = append(out, st)
|
||||||
|
outMu.Unlock()
|
||||||
|
}()
|
||||||
|
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
st.Status = "Not an S3 object"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bucket, bucketPath := o.split()
|
||||||
|
reqCopy := req
|
||||||
|
reqCopy.Bucket = &bucket
|
||||||
|
reqCopy.Key = &bucketPath
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
_, err = f.c.RestoreObject(&reqCopy)
|
||||||
|
return f.shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
st.Status = err.Error()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
default:
|
||||||
|
return nil, fs.ErrorCommandNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
|
@ -2657,6 +2779,7 @@ var (
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.Commander = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.GetTierer = &Object{}
|
_ fs.GetTierer = &Object{}
|
||||||
|
|
Loading…
Reference in a new issue