From 7ba91015f5f2dcc9f536ff762519fd57ad95e600 Mon Sep 17 00:00:00 2001 From: Milos Gajdos Date: Mon, 18 Dec 2023 09:52:19 +0000 Subject: [PATCH 1/2] fix: remove disabling of multipart combine small parts This reverts https://github.com/distribution/distribution/pull/3556 This feature is currently broken and requires more fundamental changes in the S3 driver. Until then it's better to remove it. Signed-off-by: Milos Gajdos --- docs/content/storage-drivers/s3.md | 4 +++ registry/storage/driver/s3-aws/s3.go | 23 +------------ registry/storage/driver/s3-aws/s3_test.go | 42 +++++++++-------------- 3 files changed, 21 insertions(+), 48 deletions(-) diff --git a/docs/content/storage-drivers/s3.md b/docs/content/storage-drivers/s3.md index 491ed5ff..669ff058 100644 --- a/docs/content/storage-drivers/s3.md +++ b/docs/content/storage-drivers/s3.md @@ -59,6 +59,10 @@ Amazon S3 or S3 compatible services for object storage. `loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details. +**NOTE:** Currently the S3 storage driver does not support S3 API compatible storage that +does not allow combining the last part in the multipart upload into a part that is bigger +than the preconfigured `chunkSize`. + ## S3 permission scopes The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket. diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 7dde1f72..d6e5f032 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -111,7 +111,6 @@ type DriverParameters struct { MultipartCopyChunkSize int64 MultipartCopyMaxConcurrency int64 MultipartCopyThresholdSize int64 - MultipartCombineSmallPart bool RootDirectory string StorageClass string UserAgent string @@ -165,7 +164,6 @@ type driver struct { MultipartCopyChunkSize int64 MultipartCopyMaxConcurrency int64 MultipartCopyThresholdSize int64 - MultipartCombineSmallPart bool RootDirectory string StorageClass string ObjectACL string @@ -405,23 +403,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr return nil, fmt.Errorf("the useDualStack parameter should be a boolean") } - mutlipartCombineSmallPart := true - combine := parameters["multipartcombinesmallpart"] - switch combine := combine.(type) { - case string: - b, err := strconv.ParseBool(combine) - if err != nil { - return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean") - } - mutlipartCombineSmallPart = b - case bool: - mutlipartCombineSmallPart = combine - case nil: - // do nothing - default: - return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean") - } - sessionToken := "" accelerateBool := false @@ -457,7 +438,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr multipartCopyChunkSize, multipartCopyMaxConcurrency, multipartCopyThresholdSize, - mutlipartCombineSmallPart, fmt.Sprint(rootDirectory), storageClass, fmt.Sprint(userAgent), @@ -608,7 +588,6 @@ func New(ctx context.Context, params DriverParameters) (*Driver, error) { MultipartCopyChunkSize: params.MultipartCopyChunkSize, MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, - MultipartCombineSmallPart: params.MultipartCombineSmallPart, RootDirectory: params.RootDirectory, StorageClass: params.StorageClass, ObjectACL: params.ObjectACL, @@ -1636,7 +1615,7 @@ func (w *writer) flush() error { } buf := bytes.NewBuffer(w.ready.data) - if w.driver.MultipartCombineSmallPart && (w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize)) { + if w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize) { if _, err := buf.Write(w.pending.data); err != nil { return err } diff --git a/registry/storage/driver/s3-aws/s3_test.go b/registry/storage/driver/s3-aws/s3_test.go index ea11f872..8fd23c50 100644 --- a/registry/storage/driver/s3-aws/s3_test.go +++ b/registry/storage/driver/s3-aws/s3_test.go @@ -28,23 +28,22 @@ var ( func init() { var ( - accessKey = os.Getenv("AWS_ACCESS_KEY") - secretKey = os.Getenv("AWS_SECRET_KEY") - bucket = os.Getenv("S3_BUCKET") - encrypt = os.Getenv("S3_ENCRYPT") - keyID = os.Getenv("S3_KEY_ID") - secure = os.Getenv("S3_SECURE") - skipVerify = os.Getenv("S3_SKIP_VERIFY") - v4Auth = os.Getenv("S3_V4_AUTH") - region = os.Getenv("AWS_REGION") - objectACL = os.Getenv("S3_OBJECT_ACL") - regionEndpoint = os.Getenv("REGION_ENDPOINT") - forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE") - sessionToken = os.Getenv("AWS_SESSION_TOKEN") - useDualStack = os.Getenv("S3_USE_DUALSTACK") - combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART") - accelerate = os.Getenv("S3_ACCELERATE") - logLevel = os.Getenv("S3_LOGLEVEL") + accessKey = os.Getenv("AWS_ACCESS_KEY") + secretKey = os.Getenv("AWS_SECRET_KEY") + bucket = os.Getenv("S3_BUCKET") + encrypt = os.Getenv("S3_ENCRYPT") + keyID = os.Getenv("S3_KEY_ID") + secure = os.Getenv("S3_SECURE") + skipVerify = os.Getenv("S3_SKIP_VERIFY") + v4Auth = os.Getenv("S3_V4_AUTH") + region = os.Getenv("AWS_REGION") + objectACL = os.Getenv("S3_OBJECT_ACL") + regionEndpoint = os.Getenv("REGION_ENDPOINT") + forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE") + sessionToken = os.Getenv("AWS_SESSION_TOKEN") + useDualStack = os.Getenv("S3_USE_DUALSTACK") + accelerate = os.Getenv("S3_ACCELERATE") + logLevel = os.Getenv("S3_LOGLEVEL") ) var err error @@ -93,14 +92,6 @@ func init() { useDualStackBool, err = strconv.ParseBool(useDualStack) } - multipartCombineSmallPart := true - if combineSmallPart != "" { - multipartCombineSmallPart, err = strconv.ParseBool(combineSmallPart) - if err != nil { - return nil, err - } - } - accelerateBool := true if accelerate != "" { accelerateBool, err = strconv.ParseBool(accelerate) @@ -125,7 +116,6 @@ func init() { defaultMultipartCopyChunkSize, defaultMultipartCopyMaxConcurrency, defaultMultipartCopyThresholdSize, - multipartCombineSmallPart, rootDirectory, storageClass, driverName + "-test", From 7fb303e92220310327439b2c0bfb6fdc114e40bf Mon Sep 17 00:00:00 2001 From: Milos Gajdos Date: Mon, 18 Dec 2023 16:43:54 +0000 Subject: [PATCH 2/2] Update s3.md Co-authored-by: Cory Snider Signed-off-by: Milos Gajdos --- docs/content/storage-drivers/s3.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/content/storage-drivers/s3.md b/docs/content/storage-drivers/s3.md index 669ff058..cf393c8f 100644 --- a/docs/content/storage-drivers/s3.md +++ b/docs/content/storage-drivers/s3.md @@ -59,9 +59,8 @@ Amazon S3 or S3 compatible services for object storage. `loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details. -**NOTE:** Currently the S3 storage driver does not support S3 API compatible storage that -does not allow combining the last part in the multipart upload into a part that is bigger -than the preconfigured `chunkSize`. +**NOTE:** Currently the S3 storage driver only supports S3 API compatible storage that +allows parts of a multipart upload to vary in size. [Cloudflare R2 is not supported.](https://developers.cloudflare.com/r2/objects/multipart-objects/#limitations) ## S3 permission scopes