forked from TrueCloudLab/distribution
fix: remove disabling of multipart combine small parts
This reverts https://github.com/distribution/distribution/pull/3556 This feature is currently broken and requires more fundamental changes in the S3 driver. Until then it's better to remove it. Signed-off-by: Milos Gajdos <milosthegajdos@gmail.com>
This commit is contained in:
parent
79ef555f8a
commit
7ba91015f5
3 changed files with 21 additions and 48 deletions
|
@ -59,6 +59,10 @@ Amazon S3 or S3 compatible services for object storage.
|
|||
|
||||
`loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details.
|
||||
|
||||
**NOTE:** Currently the S3 storage driver does not support S3 API compatible storage that
|
||||
does not allow combining the last part in the multipart upload into a part that is bigger
|
||||
than the preconfigured `chunkSize`.
|
||||
|
||||
## S3 permission scopes
|
||||
|
||||
The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket.
|
||||
|
|
|
@ -111,7 +111,6 @@ type DriverParameters struct {
|
|||
MultipartCopyChunkSize int64
|
||||
MultipartCopyMaxConcurrency int64
|
||||
MultipartCopyThresholdSize int64
|
||||
MultipartCombineSmallPart bool
|
||||
RootDirectory string
|
||||
StorageClass string
|
||||
UserAgent string
|
||||
|
@ -165,7 +164,6 @@ type driver struct {
|
|||
MultipartCopyChunkSize int64
|
||||
MultipartCopyMaxConcurrency int64
|
||||
MultipartCopyThresholdSize int64
|
||||
MultipartCombineSmallPart bool
|
||||
RootDirectory string
|
||||
StorageClass string
|
||||
ObjectACL string
|
||||
|
@ -405,23 +403,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr
|
|||
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
|
||||
}
|
||||
|
||||
mutlipartCombineSmallPart := true
|
||||
combine := parameters["multipartcombinesmallpart"]
|
||||
switch combine := combine.(type) {
|
||||
case string:
|
||||
b, err := strconv.ParseBool(combine)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean")
|
||||
}
|
||||
mutlipartCombineSmallPart = b
|
||||
case bool:
|
||||
mutlipartCombineSmallPart = combine
|
||||
case nil:
|
||||
// do nothing
|
||||
default:
|
||||
return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean")
|
||||
}
|
||||
|
||||
sessionToken := ""
|
||||
|
||||
accelerateBool := false
|
||||
|
@ -457,7 +438,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr
|
|||
multipartCopyChunkSize,
|
||||
multipartCopyMaxConcurrency,
|
||||
multipartCopyThresholdSize,
|
||||
mutlipartCombineSmallPart,
|
||||
fmt.Sprint(rootDirectory),
|
||||
storageClass,
|
||||
fmt.Sprint(userAgent),
|
||||
|
@ -608,7 +588,6 @@ func New(ctx context.Context, params DriverParameters) (*Driver, error) {
|
|||
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
|
||||
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
|
||||
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
|
||||
MultipartCombineSmallPart: params.MultipartCombineSmallPart,
|
||||
RootDirectory: params.RootDirectory,
|
||||
StorageClass: params.StorageClass,
|
||||
ObjectACL: params.ObjectACL,
|
||||
|
@ -1636,7 +1615,7 @@ func (w *writer) flush() error {
|
|||
}
|
||||
|
||||
buf := bytes.NewBuffer(w.ready.data)
|
||||
if w.driver.MultipartCombineSmallPart && (w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize)) {
|
||||
if w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize) {
|
||||
if _, err := buf.Write(w.pending.data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ func init() {
|
|||
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
|
||||
sessionToken = os.Getenv("AWS_SESSION_TOKEN")
|
||||
useDualStack = os.Getenv("S3_USE_DUALSTACK")
|
||||
combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART")
|
||||
accelerate = os.Getenv("S3_ACCELERATE")
|
||||
logLevel = os.Getenv("S3_LOGLEVEL")
|
||||
)
|
||||
|
@ -93,14 +92,6 @@ func init() {
|
|||
useDualStackBool, err = strconv.ParseBool(useDualStack)
|
||||
}
|
||||
|
||||
multipartCombineSmallPart := true
|
||||
if combineSmallPart != "" {
|
||||
multipartCombineSmallPart, err = strconv.ParseBool(combineSmallPart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
accelerateBool := true
|
||||
if accelerate != "" {
|
||||
accelerateBool, err = strconv.ParseBool(accelerate)
|
||||
|
@ -125,7 +116,6 @@ func init() {
|
|||
defaultMultipartCopyChunkSize,
|
||||
defaultMultipartCopyMaxConcurrency,
|
||||
defaultMultipartCopyThresholdSize,
|
||||
multipartCombineSmallPart,
|
||||
rootDirectory,
|
||||
storageClass,
|
||||
driverName + "-test",
|
||||
|
|
Loading…
Reference in a new issue