2021-11-25 15:05:58 +00:00
|
|
|
package layer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-05-24 11:30:37 +00:00
|
|
|
"encoding/hex"
|
2022-03-02 16:09:02 +00:00
|
|
|
stderrors "errors"
|
|
|
|
"fmt"
|
2021-11-25 15:05:58 +00:00
|
|
|
"io"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
|
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
|
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
2022-06-02 12:09:00 +00:00
|
|
|
"github.com/nspcc-dev/neofs-s3-gw/internal/misc"
|
2022-04-25 09:57:58 +00:00
|
|
|
"github.com/nspcc-dev/neofs-sdk-go/user"
|
2021-11-25 15:05:58 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
UploadIDAttributeName = "S3-Upload-Id"
|
|
|
|
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
|
|
|
UploadKeyAttributeName = "S3-Upload-Key"
|
2022-05-12 21:15:08 +00:00
|
|
|
UploadCompletedParts = "S3-Completed-Parts"
|
2021-11-25 15:05:58 +00:00
|
|
|
UploadPartKeyPrefix = ".upload-"
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
metaPrefix = "meta-"
|
|
|
|
aclPrefix = "acl-"
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
MaxSizeUploadsList = 1000
|
|
|
|
MaxSizePartsList = 1000
|
|
|
|
UploadMinPartNumber = 1
|
|
|
|
UploadMaxPartNumber = 10000
|
|
|
|
uploadMinSize = 5 * 1048576 // 5MB
|
|
|
|
uploadMaxSize = 5 * 1073741824 // 5GB
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
UploadInfoParams struct {
|
|
|
|
UploadID string
|
|
|
|
Bkt *data.BucketInfo
|
|
|
|
Key string
|
|
|
|
}
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
CreateMultipartParams struct {
|
|
|
|
Info *UploadInfoParams
|
|
|
|
Header map[string]string
|
|
|
|
TagSet map[string]string
|
|
|
|
ACLHeaders map[string]string
|
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
UploadPartParams struct {
|
|
|
|
Info *UploadInfoParams
|
|
|
|
PartNumber int
|
|
|
|
Size int64
|
|
|
|
Reader io.Reader
|
|
|
|
}
|
|
|
|
|
|
|
|
UploadCopyParams struct {
|
|
|
|
Info *UploadInfoParams
|
|
|
|
SrcObjInfo *data.ObjectInfo
|
2022-06-02 16:56:04 +00:00
|
|
|
SrcBktInfo *data.BucketInfo
|
2021-11-25 15:05:58 +00:00
|
|
|
PartNumber int
|
|
|
|
Range *RangeParams
|
|
|
|
}
|
|
|
|
|
|
|
|
CompleteMultipartParams struct {
|
|
|
|
Info *UploadInfoParams
|
|
|
|
Parts []*CompletedPart
|
|
|
|
}
|
|
|
|
|
|
|
|
CompletedPart struct {
|
|
|
|
ETag string
|
|
|
|
PartNumber int
|
|
|
|
}
|
|
|
|
|
|
|
|
Part struct {
|
|
|
|
ETag string
|
|
|
|
LastModified string
|
|
|
|
PartNumber int
|
|
|
|
Size int64
|
|
|
|
}
|
|
|
|
|
|
|
|
ListMultipartUploadsParams struct {
|
|
|
|
Bkt *data.BucketInfo
|
|
|
|
Delimiter string
|
|
|
|
EncodingType string
|
|
|
|
KeyMarker string
|
|
|
|
MaxUploads int
|
|
|
|
Prefix string
|
|
|
|
UploadIDMarker string
|
|
|
|
}
|
|
|
|
|
|
|
|
ListPartsParams struct {
|
|
|
|
Info *UploadInfoParams
|
|
|
|
MaxParts int
|
|
|
|
PartNumberMarker int
|
|
|
|
}
|
|
|
|
|
|
|
|
ListPartsInfo struct {
|
|
|
|
Parts []*Part
|
2022-05-25 17:25:43 +00:00
|
|
|
Owner user.ID
|
2021-11-25 15:05:58 +00:00
|
|
|
NextPartNumberMarker int
|
|
|
|
IsTruncated bool
|
|
|
|
}
|
|
|
|
|
|
|
|
ListMultipartUploadsInfo struct {
|
|
|
|
Prefixes []string
|
|
|
|
Uploads []*UploadInfo
|
|
|
|
IsTruncated bool
|
|
|
|
NextKeyMarker string
|
|
|
|
NextUploadIDMarker string
|
|
|
|
}
|
|
|
|
UploadInfo struct {
|
|
|
|
IsDir bool
|
|
|
|
Key string
|
|
|
|
UploadID string
|
2022-05-25 17:25:43 +00:00
|
|
|
Owner user.ID
|
2021-11-25 15:05:58 +00:00
|
|
|
Created time.Time
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) error {
|
|
|
|
info := &data.MultipartInfo{
|
2022-05-24 08:41:10 +00:00
|
|
|
Key: p.Info.Key,
|
2022-05-23 14:34:13 +00:00
|
|
|
UploadID: p.Info.UploadID,
|
|
|
|
Owner: n.Owner(ctx),
|
|
|
|
Created: time.Now(),
|
|
|
|
Meta: make(map[string]string, len(p.Header)+len(p.ACLHeaders)+len(p.TagSet)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for key, val := range p.Header {
|
|
|
|
info.Meta[metaPrefix+key] = val
|
|
|
|
}
|
|
|
|
|
|
|
|
for key, val := range p.ACLHeaders {
|
|
|
|
info.Meta[aclPrefix+key] = val
|
|
|
|
}
|
|
|
|
|
|
|
|
for key, val := range p.TagSet {
|
|
|
|
info.Meta[tagPrefix+key] = val
|
|
|
|
}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
return n.treeService.CreateMultipartUpload(ctx, &p.Info.Bkt.CID, info)
|
2022-05-23 14:34:13 +00:00
|
|
|
}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
|
|
|
|
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, &p.Info.Bkt.CID, p.Info.Key, p.Info.UploadID)
|
|
|
|
if err != nil {
|
|
|
|
if stderrors.Is(err, ErrNodeNotFound) {
|
|
|
|
return "", errors.GetAPIError(errors.ErrNoSuchUpload)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
2022-05-24 11:30:37 +00:00
|
|
|
return "", err
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.Size > uploadMaxSize {
|
2022-05-24 11:30:37 +00:00
|
|
|
return "", errors.GetAPIError(errors.ErrEntityTooLarge)
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfo, err := n.uploadPart(ctx, multipartInfo, p)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
return objInfo.HashSum, nil
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
|
|
|
bktInfo := p.Info.Bkt
|
|
|
|
prm := PrmObjectCreate{
|
|
|
|
Container: bktInfo.CID,
|
|
|
|
Creator: bktInfo.Owner,
|
|
|
|
Attributes: make([][2]string, 2),
|
|
|
|
Payload: p.Reader,
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
|
|
|
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
|
|
|
|
|
|
|
id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
partInfo := &data.PartInfo{
|
|
|
|
Key: p.Info.Key,
|
|
|
|
UploadID: p.Info.UploadID,
|
|
|
|
Number: p.PartNumber,
|
|
|
|
OID: *id,
|
|
|
|
}
|
|
|
|
|
|
|
|
oldPartID, err := n.treeService.AddPart(ctx, &bktInfo.CID, multipartInfo.ID, partInfo)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if oldPartID != nil {
|
|
|
|
if err = n.objectDelete(ctx, bktInfo, *oldPartID); err != nil {
|
|
|
|
n.log.Error("couldn't delete old part object", zap.Error(err),
|
|
|
|
zap.String("cnrID", bktInfo.CID.EncodeToString()),
|
|
|
|
zap.String("bucket name", bktInfo.Name),
|
|
|
|
zap.String("objID", oldPartID.EncodeToString()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfo := &data.ObjectInfo{
|
|
|
|
ID: *id,
|
|
|
|
CID: bktInfo.CID,
|
|
|
|
|
|
|
|
Owner: bktInfo.Owner,
|
|
|
|
Bucket: bktInfo.Name,
|
|
|
|
Size: p.Size,
|
|
|
|
Created: time.Now(),
|
|
|
|
HashSum: hex.EncodeToString(hash),
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = n.objCache.PutObject(objInfo); err != nil {
|
|
|
|
n.log.Error("couldn't cache system object", zap.Error(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
return objInfo, nil
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
|
2022-05-24 11:30:37 +00:00
|
|
|
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, &p.Info.Bkt.CID, p.Info.Key, p.Info.UploadID)
|
|
|
|
if err != nil {
|
|
|
|
if stderrors.Is(err, ErrNodeNotFound) {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-25 15:59:36 +00:00
|
|
|
size := p.SrcObjInfo.Size
|
2021-11-25 15:05:58 +00:00
|
|
|
if p.Range != nil {
|
2022-05-25 15:59:36 +00:00
|
|
|
size = int64(p.Range.End - p.Range.Start + 1)
|
2021-11-25 15:05:58 +00:00
|
|
|
if p.Range.End > uint64(p.SrcObjInfo.Size) {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
|
|
|
|
}
|
2022-05-25 15:59:36 +00:00
|
|
|
}
|
|
|
|
if size > uploadMaxSize {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrEntityTooLarge)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
metadata := make(map[string]string)
|
|
|
|
appendUploadHeaders(metadata, p.Info.UploadID, p.Info.Key, p.PartNumber)
|
|
|
|
|
2022-04-18 12:16:01 +00:00
|
|
|
pr, pw := io.Pipe()
|
|
|
|
|
|
|
|
go func() {
|
2022-05-24 11:30:37 +00:00
|
|
|
err = n.GetObject(ctx, &GetObjectParams{
|
2022-04-18 12:16:01 +00:00
|
|
|
ObjectInfo: p.SrcObjInfo,
|
|
|
|
Writer: pw,
|
|
|
|
Range: p.Range,
|
2022-06-02 16:56:04 +00:00
|
|
|
BucketInfo: p.SrcBktInfo,
|
2022-04-18 12:16:01 +00:00
|
|
|
})
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-04-18 12:16:01 +00:00
|
|
|
if err = pw.CloseWithError(err); err != nil {
|
|
|
|
n.log.Error("could not get object", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
params := &UploadPartParams{
|
|
|
|
Info: p.Info,
|
|
|
|
PartNumber: p.PartNumber,
|
|
|
|
Size: size,
|
|
|
|
Reader: pr,
|
|
|
|
}
|
|
|
|
|
|
|
|
return n.uploadPart(ctx, multipartInfo, params)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2022-03-02 16:09:02 +00:00
|
|
|
// implements io.Reader of payloads of the object list stored in the NeoFS network.
|
|
|
|
type multiObjectReader struct {
|
|
|
|
ctx context.Context
|
|
|
|
|
|
|
|
layer *layer
|
|
|
|
|
|
|
|
prm getParams
|
|
|
|
|
|
|
|
curReader io.Reader
|
|
|
|
|
|
|
|
parts []*data.ObjectInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
|
|
|
if x.curReader != nil {
|
|
|
|
n, err = x.curReader.Read(p)
|
|
|
|
if !stderrors.Is(err, io.EOF) {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(x.parts) == 0 {
|
|
|
|
return n, io.EOF
|
|
|
|
}
|
|
|
|
|
2022-06-01 17:35:20 +00:00
|
|
|
x.prm.objInfo = x.parts[0]
|
2022-03-02 16:09:02 +00:00
|
|
|
|
|
|
|
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
|
|
|
|
if err != nil {
|
|
|
|
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
x.parts = x.parts[1:]
|
|
|
|
|
|
|
|
next, err := x.Read(p[n:])
|
|
|
|
|
|
|
|
return n + next, err
|
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*data.ObjectInfo, error) {
|
2022-05-12 21:15:08 +00:00
|
|
|
var (
|
|
|
|
obj *data.ObjectInfo
|
|
|
|
partsAttrValue string
|
|
|
|
)
|
2021-11-25 15:05:58 +00:00
|
|
|
|
|
|
|
for i := 1; i < len(p.Parts); i++ {
|
|
|
|
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInvalidPartOrder)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
objects, err := n.getUploadParts(ctx, p.Info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(objects) == 1 {
|
2022-04-18 12:16:01 +00:00
|
|
|
obj, err = n.headLastVersionIfNotDeleted(ctx, p.Info.Bkt, p.Info.Key)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
2021-11-26 09:19:06 +00:00
|
|
|
if errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInvalidPart)
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2022-04-18 12:16:01 +00:00
|
|
|
if obj != nil && obj.Headers[UploadIDAttributeName] == p.Info.UploadID {
|
2021-11-25 15:05:58 +00:00
|
|
|
return obj, nil
|
|
|
|
}
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInvalidPart)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := objects[0]; !ok {
|
|
|
|
n.log.Error("could not get init multipart upload",
|
|
|
|
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
2022-06-02 12:09:00 +00:00
|
|
|
zap.String("uploadID", misc.SanitizeString(p.Info.UploadID)),
|
2021-11-25 15:05:58 +00:00
|
|
|
zap.String("uploadKey", p.Info.Key),
|
|
|
|
)
|
2022-04-13 16:56:58 +00:00
|
|
|
// we return InternalError because if we are here it means we've checked InitPart in handler before and
|
2021-11-25 15:05:58 +00:00
|
|
|
// received successful result, it's strange we didn't get the InitPart again
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInternalError)
|
|
|
|
}
|
|
|
|
|
2022-04-18 12:16:01 +00:00
|
|
|
// keep in mind objects[0] is the init part
|
|
|
|
if len(objects) <= len(p.Parts) {
|
2021-11-25 15:05:58 +00:00
|
|
|
return nil, errors.GetAPIError(errors.ErrInvalidPart)
|
|
|
|
}
|
|
|
|
|
|
|
|
parts := make([]*data.ObjectInfo, 0, len(p.Parts))
|
|
|
|
|
|
|
|
for i, part := range p.Parts {
|
|
|
|
info := objects[part.PartNumber]
|
|
|
|
if info == nil || part.ETag != info.HashSum {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInvalidPart)
|
|
|
|
}
|
|
|
|
// for the last part we have no minimum size limit
|
|
|
|
if i != len(p.Parts)-1 && info.Size < uploadMinSize {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
|
|
|
}
|
|
|
|
parts = append(parts, info)
|
2022-05-12 21:15:08 +00:00
|
|
|
partsAttrValue += strconv.Itoa(part.PartNumber) + "=" + strconv.FormatInt(info.Size, 10) + ","
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
initMetadata := objects[0].Headers
|
|
|
|
if len(objects[0].ContentType) != 0 {
|
|
|
|
initMetadata[api.ContentType] = objects[0].ContentType
|
|
|
|
}
|
|
|
|
|
2022-04-13 16:56:58 +00:00
|
|
|
/* We will keep "S3-Upload-Id" attribute in a completed object to determine if it is a "common" object or a completed object.
|
2021-11-25 15:05:58 +00:00
|
|
|
We will need to differ these objects if something goes wrong during completing multipart upload.
|
|
|
|
I.e. we had completed the object but didn't put tagging/acl for some reason */
|
|
|
|
delete(initMetadata, UploadPartNumberAttributeName)
|
|
|
|
delete(initMetadata, UploadKeyAttributeName)
|
|
|
|
delete(initMetadata, attrVersionsIgnore)
|
2022-04-18 12:16:01 +00:00
|
|
|
delete(initMetadata, objectSystemAttributeName)
|
|
|
|
delete(initMetadata, versionsUnversionedAttr)
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-05-12 21:15:08 +00:00
|
|
|
initMetadata[UploadCompletedParts] = partsAttrValue[:len(partsAttrValue)-1]
|
|
|
|
|
2022-03-02 16:09:02 +00:00
|
|
|
r := &multiObjectReader{
|
|
|
|
ctx: ctx,
|
|
|
|
layer: n,
|
|
|
|
parts: parts,
|
|
|
|
}
|
2022-02-08 16:54:04 +00:00
|
|
|
|
2022-06-02 16:56:04 +00:00
|
|
|
r.prm.bktInfo = p.Info.Bkt
|
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
obj, err = n.PutObject(ctx, &PutObjectParams{
|
|
|
|
BktInfo: p.Info.Bkt,
|
|
|
|
Object: p.Info.Key,
|
|
|
|
Reader: r,
|
|
|
|
Header: initMetadata,
|
2022-03-02 16:09:02 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
n.log.Error("could not put a completed object (multipart upload)",
|
2022-06-02 12:09:00 +00:00
|
|
|
zap.String("uploadID", misc.SanitizeString(p.Info.UploadID)),
|
2022-03-02 16:09:02 +00:00
|
|
|
zap.String("uploadKey", p.Info.Key),
|
|
|
|
zap.Error(err))
|
2021-11-25 15:05:58 +00:00
|
|
|
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInternalError)
|
|
|
|
}
|
|
|
|
|
|
|
|
for partNum, objInfo := range objects {
|
|
|
|
if partNum == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2022-06-01 17:35:20 +00:00
|
|
|
if err = n.objectDelete(ctx, p.Info.Bkt, objInfo.ID); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
n.log.Warn("could not delete upload part",
|
|
|
|
zap.Stringer("object id", objInfo.ID),
|
|
|
|
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
2022-04-18 12:16:01 +00:00
|
|
|
n.systemCache.Delete(systemObjectKey(p.Info.Bkt, FormUploadPartName(p.Info.UploadID, p.Info.Key, partNum)))
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return obj, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUploadsParams) (*ListMultipartUploadsInfo, error) {
|
|
|
|
var result ListMultipartUploadsInfo
|
|
|
|
if p.MaxUploads == 0 {
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2022-05-24 08:41:10 +00:00
|
|
|
multipartInfos, err := n.treeService.GetMultipartUploadsByPrefix(ctx, &p.Bkt.CID, p.Prefix)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-24 08:41:10 +00:00
|
|
|
uploads := make([]*UploadInfo, 0, len(multipartInfos))
|
2021-11-25 15:05:58 +00:00
|
|
|
uniqDirs := make(map[string]struct{})
|
|
|
|
|
2022-05-24 08:41:10 +00:00
|
|
|
for _, multipartInfo := range multipartInfos {
|
|
|
|
info := uploadInfoFromMultipartInfo(multipartInfo, p.Prefix, p.Delimiter)
|
2021-11-25 15:05:58 +00:00
|
|
|
if info != nil {
|
|
|
|
if info.IsDir {
|
|
|
|
if _, ok := uniqDirs[info.Key]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
uniqDirs[info.Key] = struct{}{}
|
|
|
|
}
|
|
|
|
uploads = append(uploads, info)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(uploads, func(i, j int) bool {
|
|
|
|
if uploads[i].Key == uploads[j].Key {
|
|
|
|
return uploads[i].UploadID < uploads[j].UploadID
|
|
|
|
}
|
|
|
|
return uploads[i].Key < uploads[j].Key
|
|
|
|
})
|
|
|
|
|
|
|
|
if p.KeyMarker != "" {
|
|
|
|
if p.UploadIDMarker != "" {
|
|
|
|
uploads = trimAfterUploadIDAndKey(p.KeyMarker, p.UploadIDMarker, uploads)
|
|
|
|
} else {
|
|
|
|
uploads = trimAfterUploadKey(p.KeyMarker, uploads)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(uploads) > p.MaxUploads {
|
|
|
|
result.IsTruncated = true
|
|
|
|
uploads = uploads[:p.MaxUploads]
|
|
|
|
result.NextUploadIDMarker = uploads[len(uploads)-1].UploadID
|
|
|
|
result.NextKeyMarker = uploads[len(uploads)-1].Key
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ov := range uploads {
|
|
|
|
if ov.IsDir {
|
|
|
|
result.Prefixes = append(result.Prefixes, ov.Key)
|
|
|
|
} else {
|
|
|
|
result.Uploads = append(result.Uploads, ov)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) error {
|
|
|
|
objects, err := n.getUploadParts(ctx, p)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, info := range objects {
|
2022-06-22 19:40:52 +00:00
|
|
|
if err = n.objectDelete(ctx, p.Bkt, info.ID); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error) {
|
|
|
|
var res ListPartsInfo
|
|
|
|
objs, err := n.getUploadParts(ctx, p.Info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res.Owner = objs[0].Owner
|
|
|
|
|
|
|
|
parts := make([]*Part, 0, len(objs))
|
|
|
|
|
|
|
|
for num, objInfo := range objs {
|
|
|
|
if num == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
parts = append(parts, &Part{
|
|
|
|
ETag: objInfo.HashSum,
|
2022-01-25 13:13:17 +00:00
|
|
|
LastModified: objInfo.Created.UTC().Format(time.RFC3339),
|
2021-11-25 15:05:58 +00:00
|
|
|
PartNumber: num,
|
|
|
|
Size: objInfo.Size,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(parts, func(i, j int) bool {
|
|
|
|
return parts[i].PartNumber < parts[j].PartNumber
|
|
|
|
})
|
|
|
|
|
|
|
|
if p.PartNumberMarker != 0 {
|
|
|
|
for i, part := range parts {
|
|
|
|
if part.PartNumber > p.PartNumberMarker {
|
|
|
|
parts = parts[i:]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(parts) > p.MaxParts {
|
|
|
|
res.IsTruncated = true
|
|
|
|
res.NextPartNumberMarker = parts[p.MaxParts-1].PartNumber
|
|
|
|
parts = parts[:p.MaxParts]
|
|
|
|
}
|
|
|
|
|
|
|
|
res.Parts = parts
|
|
|
|
|
|
|
|
return &res, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) GetUploadInitInfo(ctx context.Context, p *UploadInfoParams) (*data.ObjectInfo, error) {
|
2022-04-18 12:16:01 +00:00
|
|
|
info, err := n.HeadSystemObject(ctx, p.Bkt, FormUploadPartName(p.UploadID, p.Key, 0))
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
2022-04-18 12:16:01 +00:00
|
|
|
if errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-18 12:16:01 +00:00
|
|
|
return info, nil
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (map[int]*data.ObjectInfo, error) {
|
2022-04-18 12:16:01 +00:00
|
|
|
// we search by UploadID attribute because parts are system objects which have system name not filename
|
|
|
|
// and search in attributes by prefix is not supported
|
2021-11-25 15:05:58 +00:00
|
|
|
f := &findParams{
|
2022-04-18 12:16:01 +00:00
|
|
|
attr: [2]string{UploadIDAttributeName, p.UploadID},
|
2022-06-01 17:35:20 +00:00
|
|
|
bkt: p.Bkt,
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ids, err := n.objectSearch(ctx, f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res := make(map[int]*data.ObjectInfo)
|
|
|
|
|
2022-02-08 16:54:04 +00:00
|
|
|
for i := range ids {
|
2022-06-01 17:35:20 +00:00
|
|
|
meta, err := n.objectHead(ctx, p.Bkt, ids[i])
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
n.log.Warn("couldn't head a part of upload",
|
2022-02-08 16:54:04 +00:00
|
|
|
zap.Stringer("object id", &ids[i]),
|
2021-11-25 15:05:58 +00:00
|
|
|
zap.Stringer("bucket id", p.Bkt.CID),
|
|
|
|
zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
info := objInfoFromMeta(p.Bkt, meta)
|
2022-04-18 12:16:01 +00:00
|
|
|
// skip objects which are completed by "complete-multipart-upload" because they have "s3-Upload-Id" attribute
|
|
|
|
if !isSystem(info) {
|
|
|
|
continue
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
numStr := info.Headers[UploadPartNumberAttributeName]
|
|
|
|
num, err := strconv.Atoi(numStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrInternalError)
|
|
|
|
}
|
|
|
|
res[num] = info
|
2022-04-18 12:16:01 +00:00
|
|
|
if err = n.systemCache.PutObject(systemObjectKey(p.Bkt, FormUploadPartName(p.UploadID, p.Key, num)), info); err != nil {
|
|
|
|
n.log.Warn("couldn't cache upload part", zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(res) == 0 {
|
|
|
|
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2022-04-18 12:16:01 +00:00
|
|
|
func FormUploadPartName(uploadID, key string, partNumber int) string {
|
2021-11-25 15:05:58 +00:00
|
|
|
return UploadPartKeyPrefix + uploadID + "-" + key + "-" + strconv.Itoa(partNumber)
|
|
|
|
}
|
|
|
|
|
|
|
|
func trimAfterUploadIDAndKey(key, id string, uploads []*UploadInfo) []*UploadInfo {
|
|
|
|
var res []*UploadInfo
|
|
|
|
if len(uploads) != 0 && uploads[len(uploads)-1].Key < key {
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, obj := range uploads {
|
|
|
|
if obj.Key >= key && obj.UploadID > id {
|
|
|
|
res = append(res, obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
func trimAfterUploadKey(key string, objects []*UploadInfo) []*UploadInfo {
|
|
|
|
var result []*UploadInfo
|
|
|
|
if len(objects) != 0 && objects[len(objects)-1].Key <= key {
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
for i, obj := range objects {
|
|
|
|
if obj.Key > key {
|
|
|
|
result = objects[i:]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2022-05-24 08:41:10 +00:00
|
|
|
func uploadInfoFromMultipartInfo(uploadInfo *data.MultipartInfo, prefix, delimiter string) *UploadInfo {
|
|
|
|
var isDir bool
|
|
|
|
key := uploadInfo.Key
|
2021-11-25 15:05:58 +00:00
|
|
|
|
|
|
|
if !strings.HasPrefix(key, prefix) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(delimiter) > 0 {
|
|
|
|
tail := strings.TrimPrefix(key, prefix)
|
|
|
|
index := strings.Index(tail, delimiter)
|
|
|
|
if index >= 0 {
|
|
|
|
isDir = true
|
|
|
|
key = prefix + tail[:index+1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &UploadInfo{
|
|
|
|
IsDir: isDir,
|
|
|
|
Key: key,
|
2022-05-24 08:41:10 +00:00
|
|
|
UploadID: uploadInfo.UploadID,
|
|
|
|
Owner: uploadInfo.Owner,
|
|
|
|
Created: uploadInfo.Created,
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func appendUploadHeaders(metadata map[string]string, uploadID, key string, partNumber int) {
|
|
|
|
metadata[UploadIDAttributeName] = uploadID
|
|
|
|
metadata[UploadPartNumberAttributeName] = strconv.Itoa(partNumber)
|
|
|
|
}
|