fs: Add CanHaveEmptyDirectories and BucketBased feature flags to all remotes
This commit is contained in:
parent
c1bfdd893f
commit
20ae7d562b
18 changed files with 88 additions and 25 deletions
|
@ -190,7 +190,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fs.Config.Client(),
|
||||
}
|
||||
f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
|
|
|
@ -214,7 +214,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
|
|
6
b2/b2.go
6
b2/b2.go
|
@ -257,7 +257,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if *b2TestMode != "" {
|
||||
testMode := strings.TrimSpace(*b2TestMode)
|
||||
|
|
|
@ -244,7 +244,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{CaseInsensitive: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
|
|
|
@ -104,10 +104,12 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: mode == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
CaseInsensitive: mode == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f).Mask(wrappedFs)
|
||||
return f, err
|
||||
}
|
||||
|
|
|
@ -408,7 +408,12 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||
}
|
||||
f.teamDriveID = fs.ConfigFileGet(name, "team_drive")
|
||||
f.isTeamDrive = f.teamDriveID != ""
|
||||
f.features = (&fs.Features{DuplicateFiles: true, ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
|
|
|
@ -185,7 +185,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
srv: srv,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
|
||||
// See if the root is actually an object
|
||||
|
|
14
fs/fs.go
14
fs/fs.go
|
@ -245,11 +245,13 @@ type ListRFn func(dir string, callback ListRCallback) error
|
|||
|
||||
// Features describe the optional features of the Fs
|
||||
type Features struct {
|
||||
// Feature flags
|
||||
CaseInsensitive bool
|
||||
DuplicateFiles bool
|
||||
ReadMimeType bool
|
||||
WriteMimeType bool
|
||||
// Feature flags, whether Fs
|
||||
CaseInsensitive bool // has case insensitive files
|
||||
DuplicateFiles bool // allows duplicate files
|
||||
ReadMimeType bool // can read the mime type of objects
|
||||
WriteMimeType bool // can set the mime type of objects
|
||||
CanHaveEmptyDirectories bool // can have empty directories
|
||||
BucketBased bool // is bucket based (like s3, swift etc)
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
|
@ -444,6 +446,8 @@ func (ft *Features) Mask(f Fs) *Features {
|
|||
ft.DuplicateFiles = ft.DuplicateFiles && mask.DuplicateFiles
|
||||
ft.ReadMimeType = ft.ReadMimeType && mask.ReadMimeType
|
||||
ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType
|
||||
ft.CanHaveEmptyDirectories = ft.CanHaveEmptyDirectories && mask.CanHaveEmptyDirectories
|
||||
ft.BucketBased = ft.BucketBased && mask.BucketBased
|
||||
if mask.Purge == nil {
|
||||
ft.Purge = nil
|
||||
}
|
||||
|
|
|
@ -207,7 +207,9 @@ func NewFs(name, root string) (ff fs.Fs, err error) {
|
|||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
}
|
||||
f.features = (&fs.Features{}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
|
|
|
@ -317,7 +317,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
location: fs.ConfigFileGet(name, "location"),
|
||||
storageClass: fs.ConfigFileGet(name, "storage_class"),
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.objectACL == "" {
|
||||
f.objectACL = "private"
|
||||
}
|
||||
|
|
|
@ -151,7 +151,9 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
}
|
||||
f.features = (&fs.Features{}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if isFile {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
|
|
@ -92,7 +92,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
dirNames: newMapper(),
|
||||
}
|
||||
f.root = f.cleanPath(root)
|
||||
f.features = (&fs.Features{CaseInsensitive: f.caseInsensitive()}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if *followSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
|
|
|
@ -205,7 +205,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
|
|
|
@ -240,7 +240,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
bucket: bucket,
|
||||
svc: svc,
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
|
||||
if f.root != "" {
|
||||
if !strings.HasSuffix(f.root, "/") {
|
||||
|
|
6
s3/s3.go
6
s3/s3.go
|
@ -401,7 +401,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
sse: fs.ConfigFileGet(name, "server_side_encryption"),
|
||||
storageClass: fs.ConfigFileGet(name, "storage_class"),
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if *s3ACL != "" {
|
||||
f.acl = *s3ACL
|
||||
}
|
||||
|
|
|
@ -290,7 +290,9 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
}
|
||||
f.features = (&fs.Features{}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
|
|
|
@ -200,7 +200,11 @@ func NewFsWithConnection(name, root string, c *swift.Connection) (fs.Fs, error)
|
|||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// StorageURL overloading
|
||||
storageURL := fs.ConfigFileGet(name, "storage_url")
|
||||
if storageURL != "" {
|
||||
|
|
|
@ -133,7 +133,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||
name: name,
|
||||
yd: yandexDisk,
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
|
||||
// Check to see if the object exists and is a file
|
||||
|
|
Loading…
Reference in a new issue