From 75257fc9cdcaba71aba29e733c75c253fcf1540d Mon Sep 17 00:00:00 2001 From: Kaloyan Raev Date: Wed, 16 Oct 2024 17:33:01 +0300 Subject: [PATCH] s3: Storj provider: fix server-side copy of files bigger than 5GB Like some other S3-compatible providers, Storj does not currently implements UploadPartCopy and returns NotImplemented errors for multi-part server side copies. This patch works around the problem by raising --s3-copy-cutoff for Storj to the maximum. This means that rclone will never use multi-part copies for files in Storj. This includes files larger than 5GB which (according to AWS documentation) must be copied with multi-part copy. This works fine for Storj. See https://github.com/storj/roadmap/issues/40 --- backend/s3/s3.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index eda36a259..3c62dd3c5 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3470,6 +3470,10 @@ func setQuirks(opt *Options) { opt.ChunkSize = 64 * fs.Mebi } useAlreadyExists = false // returns BucketAlreadyExists + // Storj doesn't support multi-part server side copy: + // https://github.com/storj/roadmap/issues/40 + // So make cutoff very large which it does support + opt.CopyCutoff = math.MaxInt64 case "Synology": useMultipartEtag = false useAlreadyExists = false // untested