fix: remove disabling of multipart combine small parts (#4193)
This commit is contained in:
commit
4a360f9da2
3 changed files with 20 additions and 48 deletions
|
@ -77,6 +77,9 @@ Amazon S3 or S3 compatible services for object storage.
|
||||||
|
|
||||||
`loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details.
|
`loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details.
|
||||||
|
|
||||||
|
**NOTE:** Currently the S3 storage driver only supports S3 API compatible storage that
|
||||||
|
allows parts of a multipart upload to vary in size. [Cloudflare R2 is not supported.](https://developers.cloudflare.com/r2/objects/multipart-objects/#limitations)
|
||||||
|
|
||||||
## S3 permission scopes
|
## S3 permission scopes
|
||||||
|
|
||||||
The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket.
|
The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket.
|
||||||
|
|
|
@ -111,7 +111,6 @@ type DriverParameters struct {
|
||||||
MultipartCopyChunkSize int64
|
MultipartCopyChunkSize int64
|
||||||
MultipartCopyMaxConcurrency int64
|
MultipartCopyMaxConcurrency int64
|
||||||
MultipartCopyThresholdSize int64
|
MultipartCopyThresholdSize int64
|
||||||
MultipartCombineSmallPart bool
|
|
||||||
RootDirectory string
|
RootDirectory string
|
||||||
StorageClass string
|
StorageClass string
|
||||||
UserAgent string
|
UserAgent string
|
||||||
|
@ -165,7 +164,6 @@ type driver struct {
|
||||||
MultipartCopyChunkSize int64
|
MultipartCopyChunkSize int64
|
||||||
MultipartCopyMaxConcurrency int64
|
MultipartCopyMaxConcurrency int64
|
||||||
MultipartCopyThresholdSize int64
|
MultipartCopyThresholdSize int64
|
||||||
MultipartCombineSmallPart bool
|
|
||||||
RootDirectory string
|
RootDirectory string
|
||||||
StorageClass string
|
StorageClass string
|
||||||
ObjectACL string
|
ObjectACL string
|
||||||
|
@ -405,23 +403,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr
|
||||||
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
|
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
|
||||||
}
|
}
|
||||||
|
|
||||||
mutlipartCombineSmallPart := true
|
|
||||||
combine := parameters["multipartcombinesmallpart"]
|
|
||||||
switch combine := combine.(type) {
|
|
||||||
case string:
|
|
||||||
b, err := strconv.ParseBool(combine)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean")
|
|
||||||
}
|
|
||||||
mutlipartCombineSmallPart = b
|
|
||||||
case bool:
|
|
||||||
mutlipartCombineSmallPart = combine
|
|
||||||
case nil:
|
|
||||||
// do nothing
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("the multipartcombinesmallpart parameter should be a boolean")
|
|
||||||
}
|
|
||||||
|
|
||||||
sessionToken := ""
|
sessionToken := ""
|
||||||
|
|
||||||
accelerateBool := false
|
accelerateBool := false
|
||||||
|
@ -457,7 +438,6 @@ func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Dr
|
||||||
multipartCopyChunkSize,
|
multipartCopyChunkSize,
|
||||||
multipartCopyMaxConcurrency,
|
multipartCopyMaxConcurrency,
|
||||||
multipartCopyThresholdSize,
|
multipartCopyThresholdSize,
|
||||||
mutlipartCombineSmallPart,
|
|
||||||
fmt.Sprint(rootDirectory),
|
fmt.Sprint(rootDirectory),
|
||||||
storageClass,
|
storageClass,
|
||||||
fmt.Sprint(userAgent),
|
fmt.Sprint(userAgent),
|
||||||
|
@ -607,7 +587,6 @@ func New(ctx context.Context, params DriverParameters) (*Driver, error) {
|
||||||
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
|
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
|
||||||
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
|
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
|
||||||
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
|
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
|
||||||
MultipartCombineSmallPart: params.MultipartCombineSmallPart,
|
|
||||||
RootDirectory: params.RootDirectory,
|
RootDirectory: params.RootDirectory,
|
||||||
StorageClass: params.StorageClass,
|
StorageClass: params.StorageClass,
|
||||||
ObjectACL: params.ObjectACL,
|
ObjectACL: params.ObjectACL,
|
||||||
|
@ -1635,7 +1614,7 @@ func (w *writer) flush() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := bytes.NewBuffer(w.ready.data)
|
buf := bytes.NewBuffer(w.ready.data)
|
||||||
if w.driver.MultipartCombineSmallPart && (w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize)) {
|
if w.pending.Len() > 0 && w.pending.Len() < int(w.driver.ChunkSize) {
|
||||||
if _, err := buf.Write(w.pending.data); err != nil {
|
if _, err := buf.Write(w.pending.data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,23 +29,22 @@ var (
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
var (
|
var (
|
||||||
accessKey = os.Getenv("AWS_ACCESS_KEY")
|
accessKey = os.Getenv("AWS_ACCESS_KEY")
|
||||||
secretKey = os.Getenv("AWS_SECRET_KEY")
|
secretKey = os.Getenv("AWS_SECRET_KEY")
|
||||||
bucket = os.Getenv("S3_BUCKET")
|
bucket = os.Getenv("S3_BUCKET")
|
||||||
encrypt = os.Getenv("S3_ENCRYPT")
|
encrypt = os.Getenv("S3_ENCRYPT")
|
||||||
keyID = os.Getenv("S3_KEY_ID")
|
keyID = os.Getenv("S3_KEY_ID")
|
||||||
secure = os.Getenv("S3_SECURE")
|
secure = os.Getenv("S3_SECURE")
|
||||||
skipVerify = os.Getenv("S3_SKIP_VERIFY")
|
skipVerify = os.Getenv("S3_SKIP_VERIFY")
|
||||||
v4Auth = os.Getenv("S3_V4_AUTH")
|
v4Auth = os.Getenv("S3_V4_AUTH")
|
||||||
region = os.Getenv("AWS_REGION")
|
region = os.Getenv("AWS_REGION")
|
||||||
objectACL = os.Getenv("S3_OBJECT_ACL")
|
objectACL = os.Getenv("S3_OBJECT_ACL")
|
||||||
regionEndpoint = os.Getenv("REGION_ENDPOINT")
|
regionEndpoint = os.Getenv("REGION_ENDPOINT")
|
||||||
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
|
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
|
||||||
sessionToken = os.Getenv("AWS_SESSION_TOKEN")
|
sessionToken = os.Getenv("AWS_SESSION_TOKEN")
|
||||||
useDualStack = os.Getenv("S3_USE_DUALSTACK")
|
useDualStack = os.Getenv("S3_USE_DUALSTACK")
|
||||||
combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART")
|
accelerate = os.Getenv("S3_ACCELERATE")
|
||||||
accelerate = os.Getenv("S3_ACCELERATE")
|
logLevel = os.Getenv("S3_LOGLEVEL")
|
||||||
logLevel = os.Getenv("S3_LOGLEVEL")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
@ -94,14 +93,6 @@ func init() {
|
||||||
useDualStackBool, err = strconv.ParseBool(useDualStack)
|
useDualStackBool, err = strconv.ParseBool(useDualStack)
|
||||||
}
|
}
|
||||||
|
|
||||||
multipartCombineSmallPart := true
|
|
||||||
if combineSmallPart != "" {
|
|
||||||
multipartCombineSmallPart, err = strconv.ParseBool(combineSmallPart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
accelerateBool := true
|
accelerateBool := true
|
||||||
if accelerate != "" {
|
if accelerate != "" {
|
||||||
accelerateBool, err = strconv.ParseBool(accelerate)
|
accelerateBool, err = strconv.ParseBool(accelerate)
|
||||||
|
@ -126,7 +117,6 @@ func init() {
|
||||||
defaultMultipartCopyChunkSize,
|
defaultMultipartCopyChunkSize,
|
||||||
defaultMultipartCopyMaxConcurrency,
|
defaultMultipartCopyMaxConcurrency,
|
||||||
defaultMultipartCopyThresholdSize,
|
defaultMultipartCopyThresholdSize,
|
||||||
multipartCombineSmallPart,
|
|
||||||
rootDirectory,
|
rootDirectory,
|
||||||
storageClass,
|
storageClass,
|
||||||
driverName + "-test",
|
driverName + "-test",
|
||||||
|
|
Loading…
Reference in a new issue