forked from TrueCloudLab/distribution
Add code to handle pagination of parts. Fixes max layer size of 10GB bug
Signed-off-by: Jack Baines <jack.baines@uk.ibm.com>
This commit is contained in:
parent
91b0f0559e
commit
bda79219b2
1 changed files with 16 additions and 7 deletions
|
@ -549,9 +549,9 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
|
||||||
|
|
||||||
// Writer returns a FileWriter which will store the content written to it
|
// Writer returns a FileWriter which will store the content written to it
|
||||||
// at the location designated by "path" after the call to Commit.
|
// at the location designated by "path" after the call to Commit.
|
||||||
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
|
func (d *driver) Writer(ctx context.Context, path string, appendParam bool) (storagedriver.FileWriter, error) {
|
||||||
key := d.s3Path(path)
|
key := d.s3Path(path)
|
||||||
if !append {
|
if !appendParam {
|
||||||
// TODO (brianbland): cancel other uploads at this path
|
// TODO (brianbland): cancel other uploads at this path
|
||||||
resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
||||||
Bucket: aws.String(d.Bucket),
|
Bucket: aws.String(d.Bucket),
|
||||||
|
@ -574,7 +574,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, parseError(path, err)
|
return nil, parseError(path, err)
|
||||||
}
|
}
|
||||||
|
var allParts []*s3.Part
|
||||||
for _, multi := range resp.Uploads {
|
for _, multi := range resp.Uploads {
|
||||||
if key != *multi.Key {
|
if key != *multi.Key {
|
||||||
continue
|
continue
|
||||||
|
@ -587,11 +587,20 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, parseError(path, err)
|
return nil, parseError(path, err)
|
||||||
}
|
}
|
||||||
var multiSize int64
|
allParts = append(allParts, resp.Parts...)
|
||||||
for _, part := range resp.Parts {
|
for *resp.IsTruncated {
|
||||||
multiSize += *part.Size
|
resp, err = d.S3.ListParts(&s3.ListPartsInput{
|
||||||
|
Bucket: aws.String(d.Bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
UploadId: multi.UploadId,
|
||||||
|
PartNumberMarker: resp.NextPartNumberMarker,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, parseError(path, err)
|
||||||
}
|
}
|
||||||
return d.newWriter(key, *multi.UploadId, resp.Parts), nil
|
allParts = append(allParts, resp.Parts...)
|
||||||
|
}
|
||||||
|
return d.newWriter(key, *multi.UploadId, allParts), nil
|
||||||
}
|
}
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue