From bf954b74ffb1d96203e49af68681655ad380f569 Mon Sep 17 00:00:00 2001 From: Kaloyan Raev Date: Wed, 16 Oct 2024 17:33:01 +0300 Subject: [PATCH] s3: Storj provider: fix server-side copy of files bigger than 5GB Like some other S3-compatible providers, Storj does not currently implements UploadPartCopy and returns NotImplemented errors for multi-part server side copies. This patch works around the problem by raising --s3-copy-cutoff for Storj to the maximum. This means that rclone will never use multi-part copies for files in Storj. This includes files larger than 5GB which (according to AWS documentation) must be copied with multi-part copy. This works fine for Storj. See https://github.com/storj/roadmap/issues/40 --- backend/s3/s3.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 636bba0e8..09e32d1c1 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3368,6 +3368,10 @@ func setQuirks(opt *Options) { opt.ChunkSize = 64 * fs.Mebi } useAlreadyExists = false // returns BucketAlreadyExists + // Storj doesn't support multi-part server side copy: + // https://github.com/storj/roadmap/issues/40 + // So make cutoff very large which it does support + opt.CopyCutoff = math.MaxInt64 case "Synology": useMultipartEtag = false useAlreadyExists = false // untested