[#475] Add extra attributes to partInfo
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
This commit is contained in:
parent
5b9a5cadef
commit
9b1ccd39be
5 changed files with 65 additions and 62 deletions
|
@ -61,6 +61,9 @@ type PartInfo struct {
|
||||||
UploadID string
|
UploadID string
|
||||||
Number int
|
Number int
|
||||||
OID oid.ID
|
OID oid.ID
|
||||||
|
Size int64
|
||||||
|
ETag string
|
||||||
|
Created time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockInfo is lock information to create appropriate tree node.
|
// LockInfo is lock information to create appropriate tree node.
|
||||||
|
|
|
@ -389,7 +389,7 @@ func (n *layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
||||||
var params getParams
|
var params getParams
|
||||||
|
|
||||||
params.objInfo = p.ObjectInfo
|
params.oid = p.ObjectInfo.ID
|
||||||
params.bktInfo = p.BucketInfo
|
params.bktInfo = p.BucketInfo
|
||||||
|
|
||||||
if p.Range != nil {
|
if p.Range != nil {
|
||||||
|
|
|
@ -199,6 +199,9 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
||||||
UploadID: p.Info.UploadID,
|
UploadID: p.Info.UploadID,
|
||||||
Number: p.PartNumber,
|
Number: p.PartNumber,
|
||||||
OID: *id,
|
OID: *id,
|
||||||
|
Size: p.Size,
|
||||||
|
ETag: hex.EncodeToString(hash),
|
||||||
|
Created: time.Now(),
|
||||||
}
|
}
|
||||||
|
|
||||||
oldPartID, err := n.treeService.AddPart(ctx, &bktInfo.CID, multipartInfo.ID, partInfo)
|
oldPartID, err := n.treeService.AddPart(ctx, &bktInfo.CID, multipartInfo.ID, partInfo)
|
||||||
|
@ -220,9 +223,9 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
||||||
|
|
||||||
Owner: bktInfo.Owner,
|
Owner: bktInfo.Owner,
|
||||||
Bucket: bktInfo.Name,
|
Bucket: bktInfo.Name,
|
||||||
Size: p.Size,
|
Size: partInfo.Size,
|
||||||
Created: time.Now(),
|
Created: partInfo.Created,
|
||||||
HashSum: hex.EncodeToString(hash),
|
HashSum: partInfo.ETag,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = n.objCache.PutObject(objInfo); err != nil {
|
if err = n.objCache.PutObject(objInfo); err != nil {
|
||||||
|
@ -287,7 +290,7 @@ type multiObjectReader struct {
|
||||||
|
|
||||||
curReader io.Reader
|
curReader io.Reader
|
||||||
|
|
||||||
parts []*data.ObjectInfo
|
parts []*data.PartInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||||
|
@ -302,7 +305,7 @@ func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||||
return n, io.EOF
|
return n, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
x.prm.objInfo = x.parts[0]
|
x.prm.oid = x.parts[0].OID
|
||||||
|
|
||||||
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
|
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -323,27 +326,27 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
multipartInfo, objects, err := n.getUploadParts(ctx, p.Info) // todo consider avoid heading objects
|
multipartInfo, partsInfo, err := n.getUploadParts(ctx, p.Info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(objects) < len(p.Parts) {
|
if len(partsInfo) < len(p.Parts) {
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := make([]*data.ObjectInfo, 0, len(p.Parts))
|
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
||||||
|
|
||||||
for i, part := range p.Parts {
|
for i, part := range p.Parts {
|
||||||
info := objects[part.PartNumber]
|
partInfo := partsInfo[part.PartNumber]
|
||||||
if info == nil || part.ETag != info.HashSum {
|
if part.ETag != partInfo.ETag {
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||||
}
|
}
|
||||||
// for the last part we have no minimum size limit
|
// for the last part we have no minimum size limit
|
||||||
if i != len(p.Parts)-1 && info.Size < uploadMinSize {
|
if i != len(p.Parts)-1 && partInfo.Size < uploadMinSize {
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
||||||
}
|
}
|
||||||
parts = append(parts, info)
|
parts = append(parts, partInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
initMetadata := make(map[string]string, len(multipartInfo.Meta))
|
initMetadata := make(map[string]string, len(multipartInfo.Meta))
|
||||||
|
@ -386,17 +389,14 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
|
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(p.Info.Bkt.CID)
|
addr.SetContainer(p.Info.Bkt.CID)
|
||||||
for partNum, objInfo := range objects {
|
for _, partInfo := range partsInfo {
|
||||||
if partNum == 0 {
|
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err = n.objectDelete(ctx, p.Info.Bkt, objInfo.ID); err != nil {
|
|
||||||
n.log.Warn("could not delete upload part",
|
n.log.Warn("could not delete upload part",
|
||||||
zap.Stringer("object id", objInfo.ID),
|
zap.Stringer("object id", &partInfo.OID),
|
||||||
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
zap.Stringer("bucket id", &p.Info.Bkt.CID),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
}
|
}
|
||||||
addr.SetObject(objInfo.ID)
|
addr.SetObject(partInfo.OID)
|
||||||
n.objCache.Delete(addr)
|
n.objCache.Delete(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,14 +464,15 @@ func (n *layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUpload
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) error {
|
func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) error {
|
||||||
multipartInfo, objects, err := n.getUploadParts(ctx, p)
|
multipartInfo, parts, err := n.getUploadParts(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, info := range objects {
|
for _, info := range parts {
|
||||||
if err = n.objectDelete(ctx, p.Bkt, info.ID); err != nil {
|
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||||
return err
|
n.log.Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||||
|
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -480,24 +481,21 @@ func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
||||||
|
|
||||||
func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error) {
|
func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error) {
|
||||||
var res ListPartsInfo
|
var res ListPartsInfo
|
||||||
multipartInfo, objs, err := n.getUploadParts(ctx, p.Info) // todo consider listing without head object from NeoFS
|
multipartInfo, partsInfo, err := n.getUploadParts(ctx, p.Info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res.Owner = multipartInfo.Owner
|
res.Owner = multipartInfo.Owner
|
||||||
|
|
||||||
parts := make([]*Part, 0, len(objs))
|
parts := make([]*Part, 0, len(partsInfo))
|
||||||
|
|
||||||
for num, objInfo := range objs {
|
for _, partInfo := range partsInfo {
|
||||||
if num == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
parts = append(parts, &Part{
|
parts = append(parts, &Part{
|
||||||
ETag: objInfo.HashSum,
|
ETag: partInfo.ETag,
|
||||||
LastModified: objInfo.Created.UTC().Format(time.RFC3339),
|
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
||||||
PartNumber: num,
|
PartNumber: partInfo.Number,
|
||||||
Size: objInfo.Size,
|
Size: partInfo.Size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -525,7 +523,7 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.ObjectInfo, error) {
|
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
|
||||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, &p.Bkt.CID, p.Key, p.UploadID)
|
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, &p.Bkt.CID, p.Key, p.UploadID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if stderrors.Is(err, ErrNodeNotFound) {
|
if stderrors.Is(err, ErrNodeNotFound) {
|
||||||
|
@ -539,28 +537,9 @@ func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res := make(map[int]*data.ObjectInfo)
|
res := make(map[int]*data.PartInfo, len(parts))
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(p.Bkt.CID)
|
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
addr.SetObject(part.OID)
|
res[part.Number] = part
|
||||||
objInfo := n.objCache.GetObject(addr)
|
|
||||||
if objInfo == nil {
|
|
||||||
meta, err := n.objectHead(ctx, p.Bkt, part.OID)
|
|
||||||
if err != nil {
|
|
||||||
n.log.Warn("couldn't head a part of upload",
|
|
||||||
zap.String("object id", part.OID.EncodeToString()),
|
|
||||||
zap.String("bucket id", p.Bkt.CID.EncodeToString()),
|
|
||||||
zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objInfo = objInfoFromMeta(p.Bkt, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
res[part.Number] = objInfo
|
|
||||||
if err = n.objCache.PutObject(objInfo); err != nil {
|
|
||||||
n.log.Warn("couldn't cache upload part", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return multipartInfo, res, nil
|
return multipartInfo, res, nil
|
||||||
|
|
|
@ -30,7 +30,7 @@ type (
|
||||||
// payload range
|
// payload range
|
||||||
off, ln uint64
|
off, ln uint64
|
||||||
|
|
||||||
objInfo *data.ObjectInfo
|
oid oid.ID
|
||||||
bktInfo *data.BucketInfo
|
bktInfo *data.BucketInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,8 +93,8 @@ func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj
|
||||||
// Zero range corresponds to full payload (panics if only offset is set).
|
// Zero range corresponds to full payload (panics if only offset is set).
|
||||||
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
||||||
prm := PrmObjectRead{
|
prm := PrmObjectRead{
|
||||||
Container: p.objInfo.CID,
|
Container: p.bktInfo.CID,
|
||||||
Object: p.objInfo.ID,
|
Object: p.oid,
|
||||||
WithPayload: true,
|
WithPayload: true,
|
||||||
PayloadRange: [2]uint64{p.off, p.ln},
|
PayloadRange: [2]uint64{p.off, p.ln},
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,6 +55,8 @@ const (
|
||||||
isTagKV = "isTag"
|
isTagKV = "isTag"
|
||||||
uploadIDKV = "UploadId"
|
uploadIDKV = "UploadId"
|
||||||
partNumberKV = "Number"
|
partNumberKV = "Number"
|
||||||
|
sizeKV = "Size"
|
||||||
|
etagKV = "ETag"
|
||||||
|
|
||||||
// keys for lock.
|
// keys for lock.
|
||||||
isLockKV = "IsLock"
|
isLockKV = "IsLock"
|
||||||
|
@ -215,16 +217,32 @@ func newMultipartInfo(node NodeResponse) (*data.MultipartInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPartInfo(node NodeResponse) (*data.PartInfo, error) {
|
func newPartInfo(node NodeResponse) (*data.PartInfo, error) {
|
||||||
|
var err error
|
||||||
partInfo := &data.PartInfo{}
|
partInfo := &data.PartInfo{}
|
||||||
|
|
||||||
for _, kv := range node.GetMeta() {
|
for _, kv := range node.GetMeta() {
|
||||||
|
value := string(kv.GetValue())
|
||||||
switch kv.GetKey() {
|
switch kv.GetKey() {
|
||||||
case partNumberKV:
|
case partNumberKV:
|
||||||
partInfo.Number, _ = strconv.Atoi(string(kv.GetValue()))
|
if partInfo.Number, err = strconv.Atoi(value); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid part number: %w", err)
|
||||||
|
}
|
||||||
case oidKV:
|
case oidKV:
|
||||||
if err := partInfo.OID.DecodeString(string(kv.GetValue())); err != nil {
|
if err = partInfo.OID.DecodeString(value); err != nil {
|
||||||
return nil, fmt.Errorf("invalid oid: %w", err)
|
return nil, fmt.Errorf("invalid oid: %w", err)
|
||||||
}
|
}
|
||||||
|
case etagKV:
|
||||||
|
partInfo.ETag = value
|
||||||
|
case sizeKV:
|
||||||
|
if partInfo.Size, err = strconv.ParseInt(value, 10, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid part size: %w", err)
|
||||||
|
}
|
||||||
|
case createdKV:
|
||||||
|
var utcMilli int64
|
||||||
|
if utcMilli, err = strconv.ParseInt(value, 10, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid created timestamp: %w", err)
|
||||||
|
}
|
||||||
|
partInfo.Created = time.UnixMilli(utcMilli)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -802,6 +820,9 @@ func (c *TreeClient) AddPart(ctx context.Context, cnrID *cid.ID, multipartNodeID
|
||||||
meta := map[string]string{
|
meta := map[string]string{
|
||||||
partNumberKV: strconv.Itoa(info.Number),
|
partNumberKV: strconv.Itoa(info.Number),
|
||||||
oidKV: info.OID.EncodeToString(),
|
oidKV: info.OID.EncodeToString(),
|
||||||
|
sizeKV: strconv.FormatInt(info.Size, 10),
|
||||||
|
createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10),
|
||||||
|
etagKV: info.ETag,
|
||||||
}
|
}
|
||||||
|
|
||||||
var foundPartID uint64
|
var foundPartID uint64
|
||||||
|
|
Loading…
Reference in a new issue