forked from TrueCloudLab/rclone
azureblob: add --azureblob-list-chunk parameter - Fixes #2390
This parameter can be used to adjust the size of the listing chunks which can be used to workaround problems listing large buckets.
This commit is contained in:
parent
1e7a7d756f
commit
282540c2d4
2 changed files with 27 additions and 11 deletions
|
@ -37,7 +37,7 @@ const (
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 10 * time.Second
|
maxSleep = 10 * time.Second
|
||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
listChunkSize = 5000 // number of items to read at once
|
maxListChunkSize = 5000 // number of items to read at once
|
||||||
modTimeKey = "mtime"
|
modTimeKey = "mtime"
|
||||||
timeFormatIn = time.RFC3339
|
timeFormatIn = time.RFC3339
|
||||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
|
@ -80,6 +80,11 @@ func init() {
|
||||||
Help: "Upload chunk size. Must fit in memory.",
|
Help: "Upload chunk size. Must fit in memory.",
|
||||||
Default: fs.SizeSuffix(defaultChunkSize),
|
Default: fs.SizeSuffix(defaultChunkSize),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "list_chunk",
|
||||||
|
Help: "Size of blob list.",
|
||||||
|
Default: maxListChunkSize,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "access_tier",
|
Name: "access_tier",
|
||||||
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
|
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
|
||||||
|
@ -91,13 +96,14 @@ func init() {
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Account string `config:"account"`
|
Account string `config:"account"`
|
||||||
Key string `config:"key"`
|
Key string `config:"key"`
|
||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
SASURL string `config:"sas_url"`
|
SASURL string `config:"sas_url"`
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
AccessTier string `config:"access_tier"`
|
ListChunkSize uint `config:"list_chunk"`
|
||||||
|
AccessTier string `config:"access_tier"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
|
@ -211,6 +217,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if opt.ChunkSize > maxChunkSize {
|
if opt.ChunkSize > maxChunkSize {
|
||||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||||
}
|
}
|
||||||
|
if opt.ListChunkSize > maxListChunkSize {
|
||||||
|
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||||
|
}
|
||||||
container, directory, err := parsePath(root)
|
container, directory, err := parsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -474,7 +483,7 @@ func (f *Fs) markContainerOK() {
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -545,7 +554,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -566,7 +575,7 @@ type listContainerFn func(*azblob.ContainerItem) error
|
||||||
// listContainersToFn lists the containers to the function supplied
|
// listContainersToFn lists the containers to the function supplied
|
||||||
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||||
params := azblob.ListContainersSegmentOptions{
|
params := azblob.ListContainersSegmentOptions{
|
||||||
MaxResults: int32(listChunkSize),
|
MaxResults: int32(f.opt.ListChunkSize),
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||||
|
|
|
@ -184,6 +184,13 @@ Upload chunk size. Default 4MB. Note that this is stored in memory
|
||||||
and there may be up to `--transfers` chunks stored at once in memory.
|
and there may be up to `--transfers` chunks stored at once in memory.
|
||||||
This can be at most 100MB.
|
This can be at most 100MB.
|
||||||
|
|
||||||
|
#### --azureblob-list-chunk=SIZE ####
|
||||||
|
|
||||||
|
List blob limit. Default is the maximum, 5000. `List blobs` requests
|
||||||
|
are permitted 2 minutes per megabyte to complete. If an operation is
|
||||||
|
taking longer than 2 minutes per megabyte on average, it will time out ( [source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) ). This limit the number of blobs items to return, to avoid the time out.
|
||||||
|
|
||||||
|
|
||||||
#### --azureblob-access-tier=Hot/Cool/Archive ####
|
#### --azureblob-access-tier=Hot/Cool/Archive ####
|
||||||
|
|
||||||
Azure storage supports blob tiering, you can configure tier in advanced
|
Azure storage supports blob tiering, you can configure tier in advanced
|
||||||
|
|
Loading…
Reference in a new issue