googlecloudstorage: add --gcs-no-check-bucket to minimise transactions and perms
Adds a configuration option to the GCS backend to allow skipping the
check if a bucket exists before copying an object to it, much like
f406dbb
added for S3.
This commit is contained in:
parent
240561850b
commit
25146b4306
1 changed files with 20 additions and 2 deletions
|
@ -295,6 +295,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||||
Help: "Durable reduced availability storage class",
|
Help: "Durable reduced availability storage class",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "no_check_bucket",
|
||||||
|
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||||
|
|
||||||
|
This can be useful when trying to minimise the number of transactions
|
||||||
|
rclone does if you know the bucket exists already.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
|
@ -317,6 +326,7 @@ type Options struct {
|
||||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||||
Location string `config:"location"`
|
Location string `config:"location"`
|
||||||
StorageClass string `config:"storage_class"`
|
StorageClass string `config:"storage_class"`
|
||||||
|
NoCheckBucket bool `config:"no_check_bucket"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -840,6 +850,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||||
}, nil)
|
}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||||
|
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||||
|
if f.opt.NoCheckBucket {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return f.makeBucket(ctx, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
// Rmdir deletes the bucket if the fs is at the root
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||||
|
@ -873,7 +891,7 @@ func (f *Fs) Precision() time.Duration {
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstBucket, dstPath := f.split(remote)
|
dstBucket, dstPath := f.split(remote)
|
||||||
err := f.makeBucket(ctx, dstBucket)
|
err := f.checkBucket(ctx, dstBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1123,7 +1141,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
err := o.fs.makeBucket(ctx, bucket)
|
err := o.fs.checkBucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue