fix: remove disabling of multipart combine small parts

This reverts https://github.com/distribution/distribution/pull/3556

This feature is currently broken and requires more fundamental changes
in the S3 driver. Until then it's better to remove it.

Signed-off-by: Milos Gajdos <milosthegajdos@gmail.com>
This commit is contained in:
Milos Gajdos 2023-12-18 09:52:19 +00:00
parent 79ef555f8a
commit 7ba91015f5
No known key found for this signature in database
3 changed files with 21 additions and 48 deletions

View file

@ -59,6 +59,10 @@ Amazon S3 or S3 compatible services for object storage.
`loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details. `loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details.
**NOTE:** Currently the S3 storage driver does not support S3 API compatible storage that
does not allow combining the last part in the multipart upload into a part that is bigger
than the preconfigured `chunkSize`.
## S3 permission scopes ## S3 permission scopes
The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket. The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket.

View file

@ -111,7 +111,6 @@ type DriverParameters struct {
MultipartCopyChunkSize int64 MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64 MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64 MultipartCopyThresholdSize int64
MultipartCombineSmallPart bool
RootDirectory string RootDirectory string
StorageClass string StorageClass string
UserAgent string UserAgent string
@ -165,7 +164,6 @@ type driver struct {
MultipartCopyChunkSize int64 MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64 MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64 MultipartCopyThresholdSize int64
MultipartCombineSmallPart bool
RootDirectory string RootDirectory string
StorageClass string StorageClass string
ObjectACL string ObjectACL string
@ -405,23 +403,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr
return nil, fmt.Errorf("the useDualStack parameter should be a boolean") return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
} }
mutlipartCombineSmallPart := true
combine := parameters["multipartcombinesmallpart"]
switch combine := combine.(type) {
case string:
b, err := strconv.ParseBool(combine)
if err != nil {
return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean")
}
mutlipartCombineSmallPart = b
case bool:
mutlipartCombineSmallPart = combine
case nil:
// do nothing
default:
return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean")
}
sessionToken := "" sessionToken := ""
accelerateBool := false accelerateBool := false
@ -457,7 +438,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr
multipartCopyChunkSize, multipartCopyChunkSize,
multipartCopyMaxConcurrency, multipartCopyMaxConcurrency,
multipartCopyThresholdSize, multipartCopyThresholdSize,
mutlipartCombineSmallPart,
fmt.Sprint(rootDirectory), fmt.Sprint(rootDirectory),
storageClass, storageClass,
fmt.Sprint(userAgent), fmt.Sprint(userAgent),
@ -608,7 +588,6 @@ func New(ctx context.Context, params DriverParameters) (*Driver, error) {
MultipartCopyChunkSize: params.MultipartCopyChunkSize, MultipartCopyChunkSize: params.MultipartCopyChunkSize,
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
MultipartCombineSmallPart: params.MultipartCombineSmallPart,
RootDirectory: params.RootDirectory, RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass, StorageClass: params.StorageClass,
ObjectACL: params.ObjectACL, ObjectACL: params.ObjectACL,
@ -1636,7 +1615,7 @@ func (w *writer) flush() error {
} }
buf := bytes.NewBuffer(w.ready.data) buf := bytes.NewBuffer(w.ready.data)
if w.driver.MultipartCombineSmallPart && (w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize)) { if w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize) {
if _, err := buf.Write(w.pending.data); err != nil { if _, err := buf.Write(w.pending.data); err != nil {
return err return err
} }

View file

@ -28,23 +28,22 @@ var (
func init() { func init() {
var ( var (
accessKey = os.Getenv("AWS_ACCESS_KEY") accessKey = os.Getenv("AWS_ACCESS_KEY")
secretKey = os.Getenv("AWS_SECRET_KEY") secretKey = os.Getenv("AWS_SECRET_KEY")
bucket = os.Getenv("S3_BUCKET") bucket = os.Getenv("S3_BUCKET")
encrypt = os.Getenv("S3_ENCRYPT") encrypt = os.Getenv("S3_ENCRYPT")
keyID = os.Getenv("S3_KEY_ID") keyID = os.Getenv("S3_KEY_ID")
secure = os.Getenv("S3_SECURE") secure = os.Getenv("S3_SECURE")
skipVerify = os.Getenv("S3_SKIP_VERIFY") skipVerify = os.Getenv("S3_SKIP_VERIFY")
v4Auth = os.Getenv("S3_V4_AUTH") v4Auth = os.Getenv("S3_V4_AUTH")
region = os.Getenv("AWS_REGION") region = os.Getenv("AWS_REGION")
objectACL = os.Getenv("S3_OBJECT_ACL") objectACL = os.Getenv("S3_OBJECT_ACL")
regionEndpoint = os.Getenv("REGION_ENDPOINT") regionEndpoint = os.Getenv("REGION_ENDPOINT")
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE") forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken = os.Getenv("AWS_SESSION_TOKEN") sessionToken = os.Getenv("AWS_SESSION_TOKEN")
useDualStack = os.Getenv("S3_USE_DUALSTACK") useDualStack = os.Getenv("S3_USE_DUALSTACK")
combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART") accelerate = os.Getenv("S3_ACCELERATE")
accelerate = os.Getenv("S3_ACCELERATE") logLevel = os.Getenv("S3_LOGLEVEL")
logLevel = os.Getenv("S3_LOGLEVEL")
) )
var err error var err error
@ -93,14 +92,6 @@ func init() {
useDualStackBool, err = strconv.ParseBool(useDualStack) useDualStackBool, err = strconv.ParseBool(useDualStack)
} }
multipartCombineSmallPart := true
if combineSmallPart != "" {
multipartCombineSmallPart, err = strconv.ParseBool(combineSmallPart)
if err != nil {
return nil, err
}
}
accelerateBool := true accelerateBool := true
if accelerate != "" { if accelerate != "" {
accelerateBool, err = strconv.ParseBool(accelerate) accelerateBool, err = strconv.ParseBool(accelerate)
@ -125,7 +116,6 @@ func init() {
defaultMultipartCopyChunkSize, defaultMultipartCopyChunkSize,
defaultMultipartCopyMaxConcurrency, defaultMultipartCopyMaxConcurrency,
defaultMultipartCopyThresholdSize, defaultMultipartCopyThresholdSize,
multipartCombineSmallPart,
rootDirectory, rootDirectory,
storageClass, storageClass,
driverName + "-test", driverName + "-test",