Tardigrade Backend
This commit is contained in:
parent
03b629064a
commit
0ce662faad
13 changed files with 1227 additions and 3 deletions
|
@ -63,6 +63,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
|
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
|
|
684
backend/tardigrade/fs.go
Normal file
684
backend/tardigrade/fs.go
Normal file
|
@ -0,0 +1,684 @@
|
||||||
|
// +build go1.13,!plan9
|
||||||
|
|
||||||
|
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||||
|
package tardigrade
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
|
|
||||||
|
"storj.io/uplink"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
existingProvider = "existing"
|
||||||
|
newProvider = "new"
|
||||||
|
)
|
||||||
|
|
||||||
|
var satMap = map[string]string{
|
||||||
|
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||||
|
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||||
|
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register with Fs
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.RegInfo{
|
||||||
|
Name: "tardigrade",
|
||||||
|
Description: "Tardigrade Decentralized Cloud Storage",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Config: func(name string, configMapper configmap.Mapper) {
|
||||||
|
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||||
|
|
||||||
|
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||||
|
|
||||||
|
if provider == newProvider {
|
||||||
|
satelliteString, _ := configMapper.Get("satellite_address")
|
||||||
|
apiKey, _ := configMapper.Get("api_key")
|
||||||
|
passphrase, _ := configMapper.Get("passphrase")
|
||||||
|
|
||||||
|
// satelliteString contains always default and passphrase can be empty
|
||||||
|
if apiKey == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
satellite, found := satMap[satelliteString]
|
||||||
|
if !found {
|
||||||
|
satellite = satelliteString
|
||||||
|
}
|
||||||
|
|
||||||
|
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Couldn't create access grant: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
serialziedAccess, err := access.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||||
|
}
|
||||||
|
configMapper.Set("satellite_address", satellite)
|
||||||
|
configMapper.Set("access_grant", serialziedAccess)
|
||||||
|
} else if provider == existingProvider {
|
||||||
|
config.FileDeleteKey(name, "satellite_address")
|
||||||
|
config.FileDeleteKey(name, "api_key")
|
||||||
|
config.FileDeleteKey(name, "passphrase")
|
||||||
|
} else {
|
||||||
|
log.Fatalf("Invalid provider type: %s", provider)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Options: []fs.Option{
|
||||||
|
{
|
||||||
|
Name: fs.ConfigProvider,
|
||||||
|
Help: "Choose an authentication method.",
|
||||||
|
Required: true,
|
||||||
|
Default: existingProvider,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "existing",
|
||||||
|
Help: "Use an existing access grant.",
|
||||||
|
}, {
|
||||||
|
Value: newProvider,
|
||||||
|
Help: "Create a new access grant from satellite address, API key, and passphrase.",
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
{
|
||||||
|
Name: "access_grant",
|
||||||
|
Help: "Access Grant.",
|
||||||
|
Required: false,
|
||||||
|
Provider: "existing",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "satellite_address",
|
||||||
|
Help: "Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.",
|
||||||
|
Required: false,
|
||||||
|
Provider: newProvider,
|
||||||
|
Default: "us-central-1.tardigrade.io",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "us-central-1.tardigrade.io",
|
||||||
|
Help: "US Central 1",
|
||||||
|
}, {
|
||||||
|
Value: "europe-west-1.tardigrade.io",
|
||||||
|
Help: "Europe West 1",
|
||||||
|
}, {
|
||||||
|
Value: "asia-east-1.tardigrade.io",
|
||||||
|
Help: "Asia East 1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "api_key",
|
||||||
|
Help: "API Key.",
|
||||||
|
Required: false,
|
||||||
|
Provider: newProvider,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "passphrase",
|
||||||
|
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
|
||||||
|
Required: false,
|
||||||
|
Provider: newProvider,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Access string `config:"access_grant"`
|
||||||
|
|
||||||
|
SatelliteAddress string `config:"satellite_address"`
|
||||||
|
APIKey string `config:"api_key"`
|
||||||
|
Passphrase string `config:"passphrase"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a remote to Tardigrade
|
||||||
|
type Fs struct {
|
||||||
|
name string // the name of the remote
|
||||||
|
root string // root of the filesystem
|
||||||
|
|
||||||
|
opts Options // parsed options
|
||||||
|
features *fs.Features // optional features
|
||||||
|
|
||||||
|
access *uplink.Access // parsed scope
|
||||||
|
|
||||||
|
project *uplink.Project // project client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied.
|
||||||
|
var (
|
||||||
|
_ fs.Fs = &Fs{}
|
||||||
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.PutStreamer = &Fs{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewFs creates a filesystem backed by Tardigrade.
|
||||||
|
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Setup filesystem and connection to Tardigrade
|
||||||
|
root = norm.NFC.String(root)
|
||||||
|
root = strings.Trim(root, "/")
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse config into Options struct
|
||||||
|
err = configstruct.Set(m, &f.opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse access
|
||||||
|
var access *uplink.Access
|
||||||
|
|
||||||
|
if f.opts.Access != "" {
|
||||||
|
access, err = uplink.ParseAccess(f.opts.Access)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "tardigrade: access")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||||
|
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "tardigrade: access")
|
||||||
|
}
|
||||||
|
|
||||||
|
serializedAccess, err := access.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "tardigrade: access")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "tardigrade: access")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if access == nil {
|
||||||
|
return nil, errors.New("access not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
f.access = access
|
||||||
|
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
BucketBased: true,
|
||||||
|
BucketBasedRootOK: true,
|
||||||
|
}).Fill(f)
|
||||||
|
|
||||||
|
project, err := f.connect(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.project = project
|
||||||
|
|
||||||
|
// Root validation needs to check the following: If a bucket path is
|
||||||
|
// specified and exists, then the object must be a directory.
|
||||||
|
//
|
||||||
|
// NOTE: At this point this must return the filesystem object we've
|
||||||
|
// created so far even if there is an error.
|
||||||
|
if root != "" {
|
||||||
|
bucketName, bucketPath := bucket.Split(root)
|
||||||
|
|
||||||
|
if bucketName != "" && bucketPath != "" {
|
||||||
|
_, err = project.StatBucket(ctx, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
return f, errors.Wrap(err, "tardigrade: bucket")
|
||||||
|
}
|
||||||
|
|
||||||
|
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||||
|
if err == nil {
|
||||||
|
if !object.IsPrefix {
|
||||||
|
// If the root is actually a file we
|
||||||
|
// need to return the *parent*
|
||||||
|
// directory of the root instead and an
|
||||||
|
// error that the original root
|
||||||
|
// requested is a file.
|
||||||
|
newRoot := path.Dir(f.root)
|
||||||
|
if newRoot == "." {
|
||||||
|
newRoot = ""
|
||||||
|
}
|
||||||
|
f.root = newRoot
|
||||||
|
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// connect opens a connection to Tardigrade.
|
||||||
|
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||||
|
fs.Debugf(f, "connecting...")
|
||||||
|
defer fs.Debugf(f, "connected: %+v", err)
|
||||||
|
|
||||||
|
cfg := uplink.Config{}
|
||||||
|
|
||||||
|
project, err = cfg.OpenProject(ctx, f.access)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "tardigrade: project")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// absolute computes the absolute bucket name and path from the filesystem root
|
||||||
|
// and the relative path provided.
|
||||||
|
func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
|
||||||
|
bn, bp := bucket.Split(path.Join(f.root, relative))
|
||||||
|
|
||||||
|
// NOTE: Technically libuplink does not care about the encoding. It is
|
||||||
|
// happy to work with them as opaque byte sequences. However, rclone
|
||||||
|
// has a test that requires two paths with the same normalized form
|
||||||
|
// (but different un-normalized forms) to point to the same file. This
|
||||||
|
// means we have to normalize before we interact with libuplink.
|
||||||
|
return norm.NFC.String(bn), norm.NFC.String(bp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("FS sj://%s", f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the ModTimes in this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash types of the filesystem.
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.NewHashSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in relative into entries. The entries can
|
||||||
|
// be returned in any order but should be for a complete directory.
|
||||||
|
//
|
||||||
|
// relative should be "" to list the root, and should not have trailing
|
||||||
|
// slashes.
|
||||||
|
//
|
||||||
|
// This should return fs.ErrDirNotFound if the directory isn't found.
|
||||||
|
func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
|
||||||
|
fs.Debugf(f, "ls ./%s", relative)
|
||||||
|
|
||||||
|
bucketName, bucketPath := f.absolute(relative)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||||
|
err = fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if bucketName == "" {
|
||||||
|
if bucketPath != "" {
|
||||||
|
return nil, fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.listBuckets(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.listObjects(ctx, relative, bucketName, bucketPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
|
fs.Debugf(f, "BKT ls")
|
||||||
|
|
||||||
|
buckets := f.project.ListBuckets(ctx, nil)
|
||||||
|
|
||||||
|
for buckets.Next() {
|
||||||
|
bucket := buckets.Item()
|
||||||
|
|
||||||
|
entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, buckets.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDirEntry creates a directory entry from an uplink object.
|
||||||
|
//
|
||||||
|
// NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
|
||||||
|
// path manipulation here is necessary to cover all the different ways the
|
||||||
|
// filesystem and object could be initialized and combined.
|
||||||
|
func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
|
||||||
|
if object.IsPrefix {
|
||||||
|
// . The entry must include the relative path as its prefix. Depending on
|
||||||
|
// | what is being listed and how the filesystem root was initialized the
|
||||||
|
// | relative path may be empty (and so we use path joining here to ensure
|
||||||
|
// | we don't end up with an empty path segment).
|
||||||
|
// |
|
||||||
|
// | . Remove the prefix used during listing.
|
||||||
|
// | |
|
||||||
|
// | | . Remove the trailing slash.
|
||||||
|
// | | |
|
||||||
|
// v v v
|
||||||
|
return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newObjectFromUplink(f, relative, object)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
|
||||||
|
fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
|
||||||
|
|
||||||
|
opts := &uplink.ListObjectsOptions{
|
||||||
|
Prefix: newPrefix(bucketPath),
|
||||||
|
|
||||||
|
System: true,
|
||||||
|
Custom: true,
|
||||||
|
}
|
||||||
|
fs.Debugf(f, "opts %+v", opts)
|
||||||
|
|
||||||
|
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||||
|
|
||||||
|
for objects.Next() {
|
||||||
|
entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = objects.Err()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListR lists the objects and directories of the Fs starting from dir
|
||||||
|
// recursively into out.
|
||||||
|
//
|
||||||
|
// relative should be "" to start from the root, and should not have trailing
|
||||||
|
// slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read. These need not be
|
||||||
|
// returned in any particular order. If callback returns an error then the
|
||||||
|
// listing will stop immediately.
|
||||||
|
//
|
||||||
|
// Don't implement this unless you have a more efficient way of listing
|
||||||
|
// recursively that doing a directory traversal.
|
||||||
|
func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
|
||||||
|
fs.Debugf(f, "ls -R ./%s", relative)
|
||||||
|
|
||||||
|
bucketName, bucketPath := f.absolute(relative)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||||
|
err = fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if bucketName == "" {
|
||||||
|
if bucketPath != "" {
|
||||||
|
return fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.listBucketsR(ctx, callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
|
||||||
|
fs.Debugf(f, "BKT ls -R")
|
||||||
|
|
||||||
|
buckets := f.project.ListBuckets(ctx, nil)
|
||||||
|
|
||||||
|
for buckets.Next() {
|
||||||
|
bucket := buckets.Item()
|
||||||
|
|
||||||
|
err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buckets.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
|
||||||
|
fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
|
||||||
|
|
||||||
|
opts := &uplink.ListObjectsOptions{
|
||||||
|
Prefix: newPrefix(bucketPath),
|
||||||
|
Recursive: true,
|
||||||
|
|
||||||
|
System: true,
|
||||||
|
Custom: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||||
|
|
||||||
|
for objects.Next() {
|
||||||
|
object := objects.Item()
|
||||||
|
|
||||||
|
err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = objects.Err()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at relative. If it can't be found it returns the
|
||||||
|
// error ErrorObjectNotFound.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
|
||||||
|
fs.Debugf(f, "stat ./%s", relative)
|
||||||
|
|
||||||
|
bucketName, bucketPath := f.absolute(relative)
|
||||||
|
|
||||||
|
object, err := f.project.StatObject(ctx, bucketName, bucketPath)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(f, "err: %+v", err)
|
||||||
|
|
||||||
|
if errors.Is(err, uplink.ErrObjectNotFound) {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newObjectFromUplink(f, relative, object), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||||
|
// But for unknown-sized objects (indicated by src.Size() == -1), Put should
|
||||||
|
// either return an error or upload it properly (rather than e.g. calling
|
||||||
|
// panic).
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so will return the
|
||||||
|
// object and the error, otherwise will return nil and the error
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||||
|
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
|
||||||
|
|
||||||
|
// Reject options we don't support.
|
||||||
|
for _, option := range options {
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Errorf(f, "Unsupported mandatory option: %v", option)
|
||||||
|
|
||||||
|
return nil, errors.New("unsupported mandatory option")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketName, bucketPath := f.absolute(src.Remote())
|
||||||
|
|
||||||
|
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
aerr := upload.Abort()
|
||||||
|
if aerr != nil {
|
||||||
|
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
|
||||||
|
"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(upload, in)
|
||||||
|
if err != nil {
|
||||||
|
err = fserrors.RetryError(err)
|
||||||
|
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = upload.Commit()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||||
|
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||||
|
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||||
|
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newObjectFromUplink(f, "", upload.Info()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||||
|
// size.
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so will return the
|
||||||
|
// object and the error, otherwise will return nil and the error.
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||||
|
return f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir makes the directory (container, bucket)
|
||||||
|
//
|
||||||
|
// Shouldn't return an error if it already exists
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
|
||||||
|
fs.Debugf(f, "mkdir -p ./%s", relative)
|
||||||
|
|
||||||
|
bucketName, _ := f.absolute(relative)
|
||||||
|
|
||||||
|
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir removes the directory (container, bucket)
|
||||||
|
//
|
||||||
|
// NOTE: Despite code documentation to the contrary, this method should not
|
||||||
|
// return an error if the directory does not exist.
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
|
||||||
|
fs.Debugf(f, "rmdir ./%s", relative)
|
||||||
|
|
||||||
|
bucketName, bucketPath := f.absolute(relative)
|
||||||
|
|
||||||
|
if bucketPath != "" {
|
||||||
|
// If we can successfully stat it, then it is an object (and not a prefix).
|
||||||
|
_, err := f.project.StatObject(ctx, bucketName, bucketPath)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, uplink.ErrObjectNotFound) {
|
||||||
|
// At this point we know it is not an object,
|
||||||
|
// but we don't know if it is a prefix for one.
|
||||||
|
//
|
||||||
|
// We check this by doing a listing and if we
|
||||||
|
// get any results back, then we know this is a
|
||||||
|
// valid prefix (which implies the directory is
|
||||||
|
// not empty).
|
||||||
|
opts := &uplink.ListObjectsOptions{
|
||||||
|
Prefix: newPrefix(bucketPath),
|
||||||
|
|
||||||
|
System: true,
|
||||||
|
Custom: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||||
|
|
||||||
|
if objects.Next() {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return objects.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.project.DeleteBucket(ctx, bucketName)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, uplink.ErrBucketNotEmpty) {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newPrefix returns a new prefix for listing conforming to the libuplink
|
||||||
|
// requirements. In particular, libuplink requires a trailing slash for
|
||||||
|
// listings, but rclone does not always provide one. Further, depending on how
|
||||||
|
// the path was initially path normalization may have removed it (e.g. a
|
||||||
|
// trailing slash from the CLI is removed before it ever get's to the backend
|
||||||
|
// code).
|
||||||
|
func newPrefix(prefix string) string {
|
||||||
|
if prefix == "" {
|
||||||
|
return prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix[len(prefix)-1] == '/' {
|
||||||
|
return prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
return prefix + "/"
|
||||||
|
}
|
204
backend/tardigrade/object.go
Normal file
204
backend/tardigrade/object.go
Normal file
|
@ -0,0 +1,204 @@
|
||||||
|
// +build go1.13,!plan9
|
||||||
|
|
||||||
|
package tardigrade
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
|
|
||||||
|
"storj.io/uplink"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Object describes a Tardigrade object
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs
|
||||||
|
|
||||||
|
absolute string
|
||||||
|
|
||||||
|
size int64
|
||||||
|
created time.Time
|
||||||
|
modified time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied.
|
||||||
|
var _ fs.Object = &Object{}
|
||||||
|
|
||||||
|
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||||
|
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||||
|
// Attempt to use the modified time from the metadata. Otherwise
|
||||||
|
// fallback to the server time.
|
||||||
|
modified := object.System.Created
|
||||||
|
|
||||||
|
if modifiedStr, ok := object.Custom["rclone:mtime"]; ok {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
modified, err = time.Parse(time.RFC3339Nano, modifiedStr)
|
||||||
|
if err != nil {
|
||||||
|
modified = object.System.Created
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketName, _ := bucket.Split(path.Join(f.root, relative))
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
fs: f,
|
||||||
|
|
||||||
|
absolute: norm.NFC.String(bucketName + "/" + object.Key),
|
||||||
|
|
||||||
|
size: object.System.ContentLength,
|
||||||
|
created: object.System.Created,
|
||||||
|
modified: modified,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the Object
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
// It is possible that we have an empty root (meaning the filesystem is
|
||||||
|
// rooted at the project level). In this case the relative path is just
|
||||||
|
// the full absolute path to the object (including the bucket name).
|
||||||
|
if o.fs.root == "" {
|
||||||
|
return o.absolute
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point we know that the filesystem itself is at least a
|
||||||
|
// bucket name (and possibly a prefix path).
|
||||||
|
//
|
||||||
|
// . This is necessary to remove the slash.
|
||||||
|
// |
|
||||||
|
// v
|
||||||
|
return o.absolute[len(o.fs.root)+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification date of the file
|
||||||
|
// It should return a best guess if one isn't available
|
||||||
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
return o.modified
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the file
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return o.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs returns read only access to the Fs that this object is part of
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the selected checksum of the file
|
||||||
|
// If no checksum is available it returns ""
|
||||||
|
func (o *Object) Hash(ctx context.Context, ty hash.Type) (_ string, err error) {
|
||||||
|
fs.Debugf(o, "%s", ty)
|
||||||
|
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable says whether this object can be stored
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the metadata on the object to set the modification date
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||||
|
fs.Debugf(o, "touch -d %q sj://%s", t, o.absolute)
|
||||||
|
|
||||||
|
return fs.ErrorCantSetModTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadCloser, err error) {
|
||||||
|
fs.Debugf(o, "cat sj://%s # %+v", o.absolute, options)
|
||||||
|
|
||||||
|
bucketName, bucketPath := bucket.Split(o.absolute)
|
||||||
|
|
||||||
|
// Convert the semantics of HTTP range headers to an offset and length
|
||||||
|
// that libuplink can use.
|
||||||
|
var (
|
||||||
|
offset int64 = 0
|
||||||
|
length int64 = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
switch opt := option.(type) {
|
||||||
|
case *fs.RangeOption:
|
||||||
|
s := opt.Start >= 0
|
||||||
|
e := opt.End >= 0
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case s && e:
|
||||||
|
offset = opt.Start
|
||||||
|
length = (opt.End + 1) - opt.Start
|
||||||
|
case s && !e:
|
||||||
|
offset = opt.Start
|
||||||
|
case !s && e:
|
||||||
|
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = object.System.ContentLength - opt.End
|
||||||
|
length = opt.End
|
||||||
|
}
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = opt.Offset
|
||||||
|
default:
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Errorf(o, "Unsupported mandatory option: %v", option)
|
||||||
|
|
||||||
|
return nil, errors.New("unsupported mandatory option")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(o, "range %d + %d", offset, length)
|
||||||
|
|
||||||
|
return o.fs.project.DownloadObject(ctx, bucketName, bucketPath, &uplink.DownloadOptions{
|
||||||
|
Offset: offset,
|
||||||
|
Length: length,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update in to the object with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||||
|
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||||
|
// return an error or update the object properly (rather than e.g. calling panic).
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
|
||||||
|
|
||||||
|
oNew, err := o.fs.Put(ctx, in, src, options...)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
*o = *(oNew.(*Object))
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove this object.
|
||||||
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
|
fs.Debugf(o, "rm sj://%s", o.absolute)
|
||||||
|
|
||||||
|
bucketName, bucketPath := bucket.Split(o.absolute)
|
||||||
|
|
||||||
|
_, err = o.fs.project.DeleteObject(ctx, bucketName, bucketPath)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
19
backend/tardigrade/tardigrade_test.go
Normal file
19
backend/tardigrade/tardigrade_test.go
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
// +build go1.13,!plan9
|
||||||
|
|
||||||
|
// Test Tardigrade filesystem interface
|
||||||
|
package tardigrade_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/tardigrade"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestTardigrade:",
|
||||||
|
NilObject: (*tardigrade.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
3
backend/tardigrade/tardigrade_unsupported.go
Normal file
3
backend/tardigrade/tardigrade_unsupported.go
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
// +build !go1.13 plan9
|
||||||
|
|
||||||
|
package tardigrade
|
|
@ -57,6 +57,7 @@ docs = [
|
||||||
"seafile.md",
|
"seafile.md",
|
||||||
"sftp.md",
|
"sftp.md",
|
||||||
"sugarsync.md",
|
"sugarsync.md",
|
||||||
|
"tardigrade.md",
|
||||||
"union.md",
|
"union.md",
|
||||||
"webdav.md",
|
"webdav.md",
|
||||||
"yandex.md",
|
"yandex.md",
|
||||||
|
|
|
@ -55,6 +55,7 @@ Rclone is a command line program to sync files and directories to and from:
|
||||||
* {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
* {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||||
* {{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
* {{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||||
* {{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
* {{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||||
|
* {{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||||
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||||
* {{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
* {{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||||
* {{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
* {{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||||
|
|
|
@ -53,6 +53,7 @@ See the following for detailed instructions for
|
||||||
* [Seafile](/seafile/)
|
* [Seafile](/seafile/)
|
||||||
* [SFTP](/sftp/)
|
* [SFTP](/sftp/)
|
||||||
* [SugarSync](/sugarsync/)
|
* [SugarSync](/sugarsync/)
|
||||||
|
* [Tardigrade](/tardigrade/)
|
||||||
* [Union](/union/)
|
* [Union](/union/)
|
||||||
* [WebDAV](/webdav/)
|
* [WebDAV](/webdav/)
|
||||||
* [Yandex Disk](/yandex/)
|
* [Yandex Disk](/yandex/)
|
||||||
|
|
|
@ -46,6 +46,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||||
| Seafile | - | No | No | No | - |
|
| Seafile | - | No | No | No | - |
|
||||||
| SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - |
|
| SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - |
|
||||||
| SugarSync | - | No | No | No | - |
|
| SugarSync | - | No | No | No | - |
|
||||||
|
| Tardigrade | - | Yes | No | No | - |
|
||||||
| WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - |
|
| WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - |
|
||||||
| Yandex Disk | MD5 | Yes | No | No | R/W |
|
| Yandex Disk | MD5 | Yes | No | No | R/W |
|
||||||
| The local filesystem | All | Yes | Depends | No | - |
|
| The local filesystem | All | Yes | Depends | No | - |
|
||||||
|
@ -346,6 +347,7 @@ operations more efficient.
|
||||||
| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||||
| SFTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
| SFTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
||||||
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes |
|
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes |
|
||||||
|
| Tardigrade | Yes † | No | No | No | No | Yes | Yes | No | No | No |
|
||||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
||||||
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | Yes |
|
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | Yes |
|
||||||
| The local filesystem | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
| The local filesystem | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||||
|
@ -355,9 +357,9 @@ operations more efficient.
|
||||||
This deletes a directory quicker than just deleting all the files in
|
This deletes a directory quicker than just deleting all the files in
|
||||||
the directory.
|
the directory.
|
||||||
|
|
||||||
† Note Swift and Hubic implement this in order to delete directory
|
† Note Swift, Hubic, and Tardigrade implement this in order to delete
|
||||||
markers but they don't actually have a quicker way of deleting files
|
directory markers but they don't actually have a quicker way of deleting
|
||||||
other than deleting them individually.
|
files other than deleting them individually.
|
||||||
|
|
||||||
‡ StreamUpload is not supported with Nextcloud
|
‡ StreamUpload is not supported with Nextcloud
|
||||||
|
|
||||||
|
|
303
docs/content/tardigrade.md
Normal file
303
docs/content/tardigrade.md
Normal file
|
@ -0,0 +1,303 @@
|
||||||
|
---
|
||||||
|
title: "Tardigrade"
|
||||||
|
description: "Rclone docs for Tardigrade"
|
||||||
|
date: "2020-04-24"
|
||||||
|
---
|
||||||
|
|
||||||
|
<i class="fas fa-dove"></i> Tardigrade
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
[Tardigrade](https://tardigrade.io) is an encrypted, secure, and
|
||||||
|
cost-effective object storage service that enables you to store, back up, and
|
||||||
|
archive large amounts of data in a decentralized manner.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
To make a new Tardigrade configuration you need one of the following:
|
||||||
|
* Access Grant that someone else shared with you.
|
||||||
|
* [API Key](https://documentation.tardigrade.io/getting-started/uploading-your-first-object/create-an-api-key)
|
||||||
|
of a Tardigrade project you are a member of.
|
||||||
|
|
||||||
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
|
rclone config
|
||||||
|
|
||||||
|
This will guide you through an interactive setup process:
|
||||||
|
|
||||||
|
### Setup with access grant
|
||||||
|
|
||||||
|
```
|
||||||
|
No remotes found - make a new one
|
||||||
|
n) New remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
n/s/q> n
|
||||||
|
name> remote
|
||||||
|
Type of storage to configure.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
[snip]
|
||||||
|
XX / Tardigrade Decentralized Cloud Storage
|
||||||
|
\ "tardigrade"
|
||||||
|
[snip]
|
||||||
|
Storage> tardigrade
|
||||||
|
** See help for tardigrade backend at: https://rclone.org/tardigrade/ **
|
||||||
|
|
||||||
|
Choose an authentication method.
|
||||||
|
Enter a string value. Press Enter for the default ("existing").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Use an existing access grant.
|
||||||
|
\ "existing"
|
||||||
|
2 / Create a new access grant from satellite address, API key, and passphrase.
|
||||||
|
\ "new"
|
||||||
|
provider> existing
|
||||||
|
Access Grant.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
access_grant> your-access-grant-received-by-someone-else
|
||||||
|
Remote config
|
||||||
|
--------------------
|
||||||
|
[remote]
|
||||||
|
type = tardigrade
|
||||||
|
access_grant = your-access-grant-received-by-someone-else
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setup with API key and passhprase
|
||||||
|
|
||||||
|
```
|
||||||
|
No remotes found - make a new one
|
||||||
|
n) New remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
n/s/q> n
|
||||||
|
name> remote
|
||||||
|
Type of storage to configure.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
[snip]
|
||||||
|
XX / Tardigrade Decentralized Cloud Storage
|
||||||
|
\ "tardigrade"
|
||||||
|
[snip]
|
||||||
|
Storage> tardigrade
|
||||||
|
** See help for tardigrade backend at: https://rclone.org/tardigrade/ **
|
||||||
|
|
||||||
|
Choose an authentication method.
|
||||||
|
Enter a string value. Press Enter for the default ("existing").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Use an existing access grant.
|
||||||
|
\ "existing"
|
||||||
|
2 / Create a new access grant from satellite address, API key, and passphrase.
|
||||||
|
\ "new"
|
||||||
|
provider> new
|
||||||
|
Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.
|
||||||
|
Enter a string value. Press Enter for the default ("us-central-1.tardigrade.io").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / US Central 1
|
||||||
|
\ "us-central-1.tardigrade.io"
|
||||||
|
2 / Europe West 1
|
||||||
|
\ "europe-west-1.tardigrade.io"
|
||||||
|
3 / Asia East 1
|
||||||
|
\ "asia-east-1.tardigrade.io"
|
||||||
|
satellite_address> 1
|
||||||
|
API Key.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
api_key> your-api-key-for-your-tardigrade-project
|
||||||
|
Encryption Passphrase. To access existing objects enter passphrase used for uploading.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
passphrase> your-human-readable-encryption-passphrase
|
||||||
|
Remote config
|
||||||
|
--------------------
|
||||||
|
[remote]
|
||||||
|
type = tardigrade
|
||||||
|
satellite_address = 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777
|
||||||
|
api_key = your-api-key-for-your-tardigrade-project
|
||||||
|
passphrase = your-human-readable-encryption-passphrase
|
||||||
|
access_grant = the-access-grant-generated-from-the-api-key-and-passphrase
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Paths are specified as `remote:bucket` (or `remote:` for the `lsf`
|
||||||
|
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
|
||||||
|
|
||||||
|
Once configured you can then use `rclone` like this.
|
||||||
|
|
||||||
|
### Create a new bucket
|
||||||
|
|
||||||
|
Use the `mkdir` command to create new bucket, e.g. `bucket`.
|
||||||
|
|
||||||
|
rclone mkdir remote:bucket
|
||||||
|
|
||||||
|
### List all buckets
|
||||||
|
|
||||||
|
Use the `lsf` command to list all buckets.
|
||||||
|
|
||||||
|
rclone lsf remote:
|
||||||
|
|
||||||
|
Note the colon (`:`) character at the end of the command line.
|
||||||
|
|
||||||
|
### Delete a bucket
|
||||||
|
|
||||||
|
Use the `rmdir` command to delete an empty bucket.
|
||||||
|
|
||||||
|
rclone rmdir remote:bucket
|
||||||
|
|
||||||
|
Use the `purge` command to delete a non-empty bucket with all its content.
|
||||||
|
|
||||||
|
rclone purge remote:bucket
|
||||||
|
|
||||||
|
### Upload objects
|
||||||
|
|
||||||
|
Use the `copy` command to upload an object.
|
||||||
|
|
||||||
|
rclone copy --progress /home/local/directory/file.ext remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
The `--progress` flag is for displaying progress information.
|
||||||
|
Remove it if you don't need this information.
|
||||||
|
|
||||||
|
Use a folder in the local path to upload all its objects.
|
||||||
|
|
||||||
|
rclone copy --progress /home/local/directory/ remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
Only modified files will be copied.
|
||||||
|
|
||||||
|
### List objects
|
||||||
|
|
||||||
|
Use the `ls` command to list recursively all objects in a bucket.
|
||||||
|
|
||||||
|
rclone ls remote:bucket
|
||||||
|
|
||||||
|
Add the folder to the remote path to list recursively all objects in this folder.
|
||||||
|
|
||||||
|
rclone ls remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
Use the `lsf` command to list non-recursively all objects in a bucket or a folder.
|
||||||
|
|
||||||
|
rclone lsf remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
### Download objects
|
||||||
|
|
||||||
|
Use the `copy` command to download an object.
|
||||||
|
|
||||||
|
rclone copy --progress remote:bucket/path/to/dir/file.ext /home/local/directory/
|
||||||
|
|
||||||
|
The `--progress` flag is for displaying progress information.
|
||||||
|
Remove it if you don't need this information.
|
||||||
|
|
||||||
|
Use a folder in the remote path to download all its objects.
|
||||||
|
|
||||||
|
rclone copy --progress remote:bucket/path/to/dir/ /home/local/directory/
|
||||||
|
|
||||||
|
### Delete objects
|
||||||
|
|
||||||
|
Use the `deletefile` command to delete a single object.
|
||||||
|
|
||||||
|
rclone deletefile remote:bucket/path/to/dir/file.ext
|
||||||
|
|
||||||
|
Use the `delete` command to delete all object in a folder.
|
||||||
|
|
||||||
|
rclone delete remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
### Print the total size of objects
|
||||||
|
|
||||||
|
Use the `size` command to print the total size of objects in a bucket or a folder.
|
||||||
|
|
||||||
|
rclone size remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
### Sync two Locations
|
||||||
|
|
||||||
|
Use the `sync` command to sync the source to the destination,
|
||||||
|
changing the destination only, deleting any excess files.
|
||||||
|
|
||||||
|
rclone sync --progress /home/local/directory/ remote:bucket/path/to/dir/
|
||||||
|
|
||||||
|
The `--progress` flag is for displaying progress information.
|
||||||
|
Remove it if you don't need this information.
|
||||||
|
|
||||||
|
Since this can cause data loss, test first with the `--dry-run` flag
|
||||||
|
to see exactly what would be copied and deleted.
|
||||||
|
|
||||||
|
The sync can be done also from Tardigrade to the local file system.
|
||||||
|
|
||||||
|
rclone sync --progress remote:bucket/path/to/dir/ /home/local/directory/
|
||||||
|
|
||||||
|
Or between two Tardigrade buckets.
|
||||||
|
|
||||||
|
rclone sync --progress remote-us:bucket/path/to/dir/ remote-europe:bucket/path/to/dir/
|
||||||
|
|
||||||
|
Or even between another cloud storage and Tardigrade.
|
||||||
|
|
||||||
|
rclone sync --progress s3:bucket/path/to/dir/ tardigrade:bucket/path/to/dir/
|
||||||
|
|
||||||
|
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/tardigrade/tardigrade.go then run make backenddocs -->
|
||||||
|
### Standard Options
|
||||||
|
|
||||||
|
Here are the standard options specific to tardigrade (Tardigrade Decentralized Cloud Storage).
|
||||||
|
|
||||||
|
#### --tardigrade-provider
|
||||||
|
|
||||||
|
Choose an authentication method.
|
||||||
|
|
||||||
|
- Config: provider
|
||||||
|
- Env Var: RCLONE_TARDIGRADE_PROVIDER
|
||||||
|
- Type: string
|
||||||
|
- Default: "existing"
|
||||||
|
- Examples:
|
||||||
|
- "existing"
|
||||||
|
- Use an existing access grant.
|
||||||
|
- "new"
|
||||||
|
- Create a new access grant from satellite address, API key, and passphrase.
|
||||||
|
|
||||||
|
#### --tardigrade-access-grant
|
||||||
|
|
||||||
|
Access Grant.
|
||||||
|
|
||||||
|
- Config: access_grant
|
||||||
|
- Env Var: RCLONE_TARDIGRADE_ACCESS_GRANT
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
|
#### --tardigrade-satellite-address
|
||||||
|
|
||||||
|
Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.
|
||||||
|
|
||||||
|
- Config: satellite_address
|
||||||
|
- Env Var: RCLONE_TARDIGRADE_SATELLITE_ADDRESS
|
||||||
|
- Type: string
|
||||||
|
- Default: "us-central-1.tardigrade.io"
|
||||||
|
- Examples:
|
||||||
|
- "us-central-1.tardigrade.io"
|
||||||
|
- US Central 1
|
||||||
|
- "europe-west-1.tardigrade.io"
|
||||||
|
- Europe West 1
|
||||||
|
- "asia-east-1.tardigrade.io"
|
||||||
|
- Asia East 1
|
||||||
|
|
||||||
|
#### --tardigrade-api-key
|
||||||
|
|
||||||
|
API Key.
|
||||||
|
|
||||||
|
- Config: api_key
|
||||||
|
- Env Var: RCLONE_TARDIGRADE_API_KEY
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
|
#### --tardigrade-passphrase
|
||||||
|
|
||||||
|
Encryption Passphrase. To access existing objects enter passphrase used for uploading.
|
||||||
|
|
||||||
|
- Config: passphrase
|
||||||
|
- Env Var: RCLONE_TARDIGRADE_PASSPHRASE
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
|
||||||
|
<!--- autogenerated options stop -->
|
|
@ -89,6 +89,7 @@
|
||||||
<li><a href="/seafile/"><i class="fa fa-server"></i> Seafile</a></li>
|
<li><a href="/seafile/"><i class="fa fa-server"></i> Seafile</a></li>
|
||||||
<li><a href="/sftp/"><i class="fa fa-server"></i> SFTP</a></li>
|
<li><a href="/sftp/"><i class="fa fa-server"></i> SFTP</a></li>
|
||||||
<li><a href="/sugarsync/"><i class="fas fa-dove"></i> SugarSync</a></li>
|
<li><a href="/sugarsync/"><i class="fas fa-dove"></i> SugarSync</a></li>
|
||||||
|
<li><a href="/tardigrade/"><i class="fas fa-dove"></i> Tardigrade</a></li>
|
||||||
<li><a href="/union/"><i class="fa fa-link"></i> Union (merge backends)</a></li>
|
<li><a href="/union/"><i class="fa fa-link"></i> Union (merge backends)</a></li>
|
||||||
<li><a href="/webdav/"><i class="fa fa-server"></i> WebDAV</a></li>
|
<li><a href="/webdav/"><i class="fa fa-server"></i> WebDAV</a></li>
|
||||||
<li><a href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a></li>
|
<li><a href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a></li>
|
||||||
|
|
|
@ -275,3 +275,6 @@ backends:
|
||||||
- backend: "seafile"
|
- backend: "seafile"
|
||||||
remote: "TestSeafileEncrypted:"
|
remote: "TestSeafileEncrypted:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
- backend: "tardigrade"
|
||||||
|
remote: "TestTardigrade:"
|
||||||
|
fastlist: true
|
||||||
|
|
Loading…
Reference in a new issue