forked from TrueCloudLab/frostfs-s3-gw
[#476] Fix parts info for GetObjectAttributes
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
This commit is contained in:
parent
9b1ccd39be
commit
c8e8ba9f6a
6 changed files with 217 additions and 101 deletions
|
@ -26,7 +26,9 @@ type (
|
||||||
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
||||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
||||||
Parts []Part `xml:"Part,omitempty"`
|
Parts []Part `xml:"Part,omitempty"`
|
||||||
PartsCount int `xml:"PartsCount,omitempty"`
|
|
||||||
|
// Only this field is used.
|
||||||
|
PartsCount int `xml:"PartsCount,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
Part struct {
|
Part struct {
|
||||||
|
@ -35,17 +37,13 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
GetObjectAttributesArgs struct {
|
GetObjectAttributesArgs struct {
|
||||||
MaxParts int
|
Attributes []string
|
||||||
PartNumberMarker int
|
VersionID string
|
||||||
Attributes []string
|
Conditional *conditionalArgs
|
||||||
VersionID string
|
|
||||||
Conditional *conditionalArgs
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
partNumberMarkerDefault = -1
|
|
||||||
|
|
||||||
eTag = "ETag"
|
eTag = "ETag"
|
||||||
checksum = "Checksum"
|
checksum = "Checksum"
|
||||||
objectParts = "ObjectParts"
|
objectParts = "ObjectParts"
|
||||||
|
@ -123,16 +121,11 @@ func writeAttributesHeaders(h http.Header, info *data.ObjectInfo, params *GetObj
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, error) {
|
func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, error) {
|
||||||
var (
|
res := &GetObjectAttributesArgs{
|
||||||
err error
|
VersionID: r.URL.Query().Get(api.QueryVersionID),
|
||||||
|
}
|
||||||
res = &GetObjectAttributesArgs{}
|
|
||||||
attributesVal = r.Header.Get("X-Amz-Object-Attributes")
|
|
||||||
maxPartsVal = r.Header.Get("X-Amz-Max-Parts")
|
|
||||||
markerVal = r.Header.Get("X-Amz-Part-Number-Marker")
|
|
||||||
queryValues = r.URL.Query()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
attributesVal := r.Header.Get(api.AmzObjectAttributes)
|
||||||
if attributesVal == "" {
|
if attributesVal == "" {
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidAttributeName)
|
return nil, errors.GetAPIError(errors.ErrInvalidAttributeName)
|
||||||
}
|
}
|
||||||
|
@ -145,22 +138,7 @@ func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, err
|
||||||
res.Attributes = append(res.Attributes, a)
|
res.Attributes = append(res.Attributes, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
if maxPartsVal == "" {
|
return res, nil
|
||||||
res.MaxParts = layer.MaxSizePartsList
|
|
||||||
} else if res.MaxParts, err = strconv.Atoi(maxPartsVal); err != nil || res.MaxParts < 0 {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
if markerVal == "" {
|
|
||||||
res.PartNumberMarker = partNumberMarkerDefault
|
|
||||||
} else if res.PartNumberMarker, err = strconv.Atoi(markerVal); err != nil || res.PartNumberMarker < 0 {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidPartNumberMarker)
|
|
||||||
}
|
|
||||||
|
|
||||||
res.VersionID = queryValues.Get(api.QueryVersionID)
|
|
||||||
|
|
||||||
res.Conditional, err = parseConditionalHeaders(r.Header)
|
|
||||||
return res, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) {
|
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) {
|
||||||
|
@ -175,7 +153,7 @@ func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttribu
|
||||||
case objectSize:
|
case objectSize:
|
||||||
resp.ObjectSize = info.Size
|
resp.ObjectSize = info.Size
|
||||||
case objectParts:
|
case objectParts:
|
||||||
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
parts, err := formUploadAttributes(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("form upload attributes: %w", err)
|
return nil, fmt.Errorf("form upload attributes: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -188,54 +166,19 @@ func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttribu
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectParts, error) {
|
func formUploadAttributes(info *data.ObjectInfo) (*ObjectParts, error) {
|
||||||
|
var err error
|
||||||
res := ObjectParts{}
|
res := ObjectParts{}
|
||||||
|
|
||||||
if _, ok := info.Headers[layer.UploadIDAttributeName]; !ok {
|
partsCountStr, ok := info.Headers[layer.UploadCompletedPartsCount]
|
||||||
|
if !ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := make([]Part, 0)
|
res.PartsCount, err = strconv.Atoi(partsCountStr)
|
||||||
val, ok := info.Headers[layer.UploadCompletedParts]
|
if err != nil {
|
||||||
if ok {
|
return nil, fmt.Errorf("invalid parts count header '%s': %w", partsCountStr, err)
|
||||||
pairs := strings.Split(val, ",")
|
|
||||||
for _, p := range pairs {
|
|
||||||
// nums[0] -- part number, nums[1] -- part size
|
|
||||||
nums := strings.Split(p, "=")
|
|
||||||
if len(nums) != 2 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
num, err := strconv.Atoi(nums[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
size, err := strconv.Atoi(nums[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parse part size: %w", err)
|
|
||||||
}
|
|
||||||
parts = append(parts, Part{PartNumber: num, Size: size})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
res.PartsCount = len(parts)
|
|
||||||
|
|
||||||
if marker != partNumberMarkerDefault {
|
|
||||||
res.PartNumberMarker = marker
|
|
||||||
for i, n := range parts {
|
|
||||||
if n.PartNumber == marker {
|
|
||||||
parts = parts[i:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res.MaxParts = maxParts
|
|
||||||
if len(parts) > maxParts {
|
|
||||||
res.IsTruncated = true
|
|
||||||
res.NextPartNumberMarker = parts[maxParts].PartNumber
|
|
||||||
parts = parts[:maxParts]
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Parts = parts
|
|
||||||
|
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
71
api/handler/atttributes_test.go
Normal file
71
api/handler/atttributes_test.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetObjectPartsAttributes(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-get-attributes"
|
||||||
|
objName, objMultipartName := "object", "object-multipart"
|
||||||
|
|
||||||
|
createTestBucket(ctx, t, hc, bktName)
|
||||||
|
|
||||||
|
body := bytes.NewReader([]byte("content"))
|
||||||
|
w, r := prepareTestPayloadRequest(bktName, objName, body)
|
||||||
|
hc.Handler().PutObjectHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(t, bktName, objName, nil)
|
||||||
|
r.Header.Set(api.AmzObjectAttributes, objectParts)
|
||||||
|
hc.Handler().GetObjectAttributesHandler(w, r)
|
||||||
|
result := &GetObjectAttributesResponse{}
|
||||||
|
parseTestResponse(t, w, result)
|
||||||
|
require.Nil(t, result.ObjectParts)
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(t, bktName, objMultipartName, nil)
|
||||||
|
hc.Handler().CreateMultipartUploadHandler(w, r)
|
||||||
|
multipartUpload := &InitiateMultipartUploadResponse{}
|
||||||
|
parseTestResponse(t, w, multipartUpload)
|
||||||
|
|
||||||
|
body2 := bytes.NewReader([]byte("content2"))
|
||||||
|
w, r = prepareTestPayloadRequest(bktName, objMultipartName, body2)
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Add(uploadIDHeaderName, multipartUpload.UploadID)
|
||||||
|
query.Add(partNumberHeaderName, "1")
|
||||||
|
r.URL.RawQuery = query.Encode()
|
||||||
|
hc.Handler().UploadPartHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
etag := w.Result().Header.Get(api.ETag)
|
||||||
|
|
||||||
|
completeUpload := &CompleteMultipartUpload{
|
||||||
|
Parts: []*layer.CompletedPart{{
|
||||||
|
ETag: etag,
|
||||||
|
PartNumber: 1,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
w, r = prepareTestRequest(t, bktName, objMultipartName, completeUpload)
|
||||||
|
query = make(url.Values)
|
||||||
|
query.Add(uploadIDHeaderName, multipartUpload.UploadID)
|
||||||
|
r.URL.RawQuery = query.Encode()
|
||||||
|
hc.Handler().CompleteMultipartUploadHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(t, bktName, objMultipartName, nil)
|
||||||
|
r.Header.Set(api.AmzObjectAttributes, objectParts)
|
||||||
|
hc.Handler().GetObjectAttributesHandler(w, r)
|
||||||
|
result = &GetObjectAttributesResponse{}
|
||||||
|
parseTestResponse(t, w, result)
|
||||||
|
require.NotNil(t, result.ObjectParts)
|
||||||
|
require.Equal(t, 1, result.ObjectParts.PartsCount)
|
||||||
|
}
|
|
@ -144,10 +144,18 @@ func prepareTestRequest(t *testing.T, bktName, objName string, body interface{})
|
||||||
return w, r
|
return w, r
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertStatus(t *testing.T, w *httptest.ResponseRecorder, status int) {
|
func prepareTestPayloadRequest(bktName, objName string, payload io.Reader) (*httptest.ResponseRecorder, *http.Request) {
|
||||||
if w.Code != status {
|
w := httptest.NewRecorder()
|
||||||
resp, err := io.ReadAll(w.Result().Body)
|
r := httptest.NewRequest(http.MethodPut, defaultURL, payload)
|
||||||
require.NoError(t, err)
|
|
||||||
require.Failf(t, string(resp), "assert status fail, expected: %d, actual: %d", status, w.Code)
|
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||||
}
|
r = r.WithContext(api.SetReqInfo(r.Context(), reqInfo))
|
||||||
|
|
||||||
|
return w, r
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTestResponse(t *testing.T, response *httptest.ResponseRecorder, body interface{}) {
|
||||||
|
assertStatus(t, response, http.StatusOK)
|
||||||
|
err := xml.NewDecoder(response.Result().Body).Decode(body)
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,7 @@ const (
|
||||||
AmzObjectLockMode = "X-Amz-Object-Lock-Mode"
|
AmzObjectLockMode = "X-Amz-Object-Lock-Mode"
|
||||||
AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date"
|
AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date"
|
||||||
AmzBypassGovernanceRetention = "X-Amz-Bypass-Governance-Retention"
|
AmzBypassGovernanceRetention = "X-Amz-Bypass-Governance-Retention"
|
||||||
|
AmzObjectAttributes = "X-Amz-Object-Attributes"
|
||||||
|
|
||||||
ContainerID = "X-Container-Id"
|
ContainerID = "X-Container-Id"
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
const (
|
const (
|
||||||
UploadIDAttributeName = "S3-Upload-Id"
|
UploadIDAttributeName = "S3-Upload-Id"
|
||||||
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
||||||
UploadCompletedParts = "S3-Completed-Parts"
|
UploadCompletedPartsCount = "S3-Completed-Parts-Count"
|
||||||
|
|
||||||
metaPrefix = "meta-"
|
metaPrefix = "meta-"
|
||||||
aclPrefix = "acl-"
|
aclPrefix = "acl-"
|
||||||
|
@ -349,7 +349,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
parts = append(parts, partInfo)
|
parts = append(parts, partInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
initMetadata := make(map[string]string, len(multipartInfo.Meta))
|
initMetadata := make(map[string]string, len(multipartInfo.Meta)+1)
|
||||||
|
initMetadata[UploadCompletedPartsCount] = strconv.Itoa(len(p.Parts))
|
||||||
uploadData := &UploadData{
|
uploadData := &UploadData{
|
||||||
TagSet: make(map[string]string),
|
TagSet: make(map[string]string),
|
||||||
ACLHeaders: make(map[string]string),
|
ACLHeaders: make(map[string]string),
|
||||||
|
|
|
@ -2,6 +2,7 @@ package layer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -11,10 +12,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type TreeServiceMock struct {
|
type TreeServiceMock struct {
|
||||||
settings map[string]*data.BucketSettings
|
settings map[string]*data.BucketSettings
|
||||||
versions map[string]map[string][]*data.NodeVersion
|
versions map[string]map[string][]*data.NodeVersion
|
||||||
system map[string]map[string]*data.BaseNodeVersion
|
system map[string]map[string]*data.BaseNodeVersion
|
||||||
locks map[string]map[uint64]*data.LockInfo
|
locks map[string]map[uint64]*data.LockInfo
|
||||||
|
multiparts map[string]map[string][]*data.MultipartInfo
|
||||||
|
parts map[string]map[int]*data.PartInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetObjectTagging(ctx context.Context, cnrID *cid.ID, objVersion *data.NodeVersion) (map[string]string, error) {
|
func (t *TreeServiceMock) GetObjectTagging(ctx context.Context, cnrID *cid.ID, objVersion *data.NodeVersion) (map[string]string, error) {
|
||||||
|
@ -49,10 +52,12 @@ func (t *TreeServiceMock) DeleteBucketTagging(ctx context.Context, cnrID *cid.ID
|
||||||
|
|
||||||
func NewTreeService() *TreeServiceMock {
|
func NewTreeService() *TreeServiceMock {
|
||||||
return &TreeServiceMock{
|
return &TreeServiceMock{
|
||||||
settings: make(map[string]*data.BucketSettings),
|
settings: make(map[string]*data.BucketSettings),
|
||||||
versions: make(map[string]map[string][]*data.NodeVersion),
|
versions: make(map[string]map[string][]*data.NodeVersion),
|
||||||
system: make(map[string]map[string]*data.BaseNodeVersion),
|
system: make(map[string]map[string]*data.BaseNodeVersion),
|
||||||
locks: make(map[string]map[uint64]*data.LockInfo),
|
locks: make(map[string]map[uint64]*data.LockInfo),
|
||||||
|
multiparts: make(map[string]map[string][]*data.MultipartInfo),
|
||||||
|
parts: make(map[string]map[int]*data.PartInfo),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,28 +250,115 @@ func (t *TreeServiceMock) GetAllVersionsByPrefix(_ context.Context, cnrID *cid.I
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) CreateMultipartUpload(ctx context.Context, cnrID *cid.ID, info *data.MultipartInfo) error {
|
func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, cnrID *cid.ID, info *data.MultipartInfo) error {
|
||||||
panic("implement me")
|
cnrMultipartsMap, ok := t.multiparts[cnrID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
t.multiparts[cnrID.EncodeToString()] = map[string][]*data.MultipartInfo{
|
||||||
|
info.Key: {info},
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
multiparts := cnrMultipartsMap[info.Key]
|
||||||
|
if len(multiparts) != 0 {
|
||||||
|
info.ID = multiparts[len(multiparts)-1].ID + 1
|
||||||
|
}
|
||||||
|
cnrMultipartsMap[info.Key] = append(multiparts, info)
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(ctx context.Context, cnrID *cid.ID, prefix string) ([]*data.MultipartInfo, error) {
|
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(ctx context.Context, cnrID *cid.ID, prefix string) ([]*data.MultipartInfo, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetMultipartUpload(ctx context.Context, cnrID *cid.ID, objectName, uploadID string) (*data.MultipartInfo, error) {
|
func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, cnrID *cid.ID, objectName, uploadID string) (*data.MultipartInfo, error) {
|
||||||
panic("implement me")
|
cnrMultipartsMap, ok := t.multiparts[cnrID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
multiparts := cnrMultipartsMap[objectName]
|
||||||
|
for _, multipart := range multiparts {
|
||||||
|
if multipart.UploadID == uploadID {
|
||||||
|
return multipart, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) AddPart(ctx context.Context, cnrID *cid.ID, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete *oid.ID, err error) {
|
func (t *TreeServiceMock) AddPart(ctx context.Context, cnrID *cid.ID, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete *oid.ID, err error) {
|
||||||
panic("implement me")
|
multipartInfo, err := t.GetMultipartUpload(ctx, cnrID, info.Key, info.UploadID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if multipartInfo.ID != multipartNodeID {
|
||||||
|
return nil, fmt.Errorf("invalid multipart info id")
|
||||||
|
}
|
||||||
|
|
||||||
|
partsMap, ok := t.parts[info.UploadID]
|
||||||
|
if !ok {
|
||||||
|
partsMap = make(map[int]*data.PartInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
partsMap[info.Number] = info
|
||||||
|
|
||||||
|
t.parts[info.UploadID] = partsMap
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetParts(ctx context.Context, cnrID *cid.ID, multipartNodeID uint64) ([]*data.PartInfo, error) {
|
func (t *TreeServiceMock) GetParts(_ context.Context, cnrID *cid.ID, multipartNodeID uint64) ([]*data.PartInfo, error) {
|
||||||
panic("implement me")
|
cnrMultipartsMap := t.multiparts[cnrID.EncodeToString()]
|
||||||
|
|
||||||
|
var foundMultipart *data.MultipartInfo
|
||||||
|
|
||||||
|
LOOP:
|
||||||
|
for _, multiparts := range cnrMultipartsMap {
|
||||||
|
for _, multipart := range multiparts {
|
||||||
|
if multipart.ID == multipartNodeID {
|
||||||
|
foundMultipart = multipart
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundMultipart == nil {
|
||||||
|
return nil, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
partsMap := t.parts[foundMultipart.UploadID]
|
||||||
|
result := make([]*data.PartInfo, 0, len(partsMap))
|
||||||
|
for _, part := range partsMap {
|
||||||
|
result = append(result, part)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) DeleteMultipartUpload(ctx context.Context, cnrID *cid.ID, multipartNodeID uint64) error {
|
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, cnrID *cid.ID, multipartNodeID uint64) error {
|
||||||
panic("implement me")
|
cnrMultipartsMap := t.multiparts[cnrID.EncodeToString()]
|
||||||
|
|
||||||
|
var uploadID string
|
||||||
|
|
||||||
|
LOOP:
|
||||||
|
for key, multiparts := range cnrMultipartsMap {
|
||||||
|
for i, multipart := range multiparts {
|
||||||
|
if multipart.ID == multipartNodeID {
|
||||||
|
uploadID = multipart.UploadID
|
||||||
|
cnrMultipartsMap[key] = append(multiparts[:i], multiparts[i+1:]...)
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadID == "" {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(t.parts, uploadID)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) PutLock(ctx context.Context, cnrID *cid.ID, nodeID uint64, lock *data.LockInfo) error {
|
func (t *TreeServiceMock) PutLock(ctx context.Context, cnrID *cid.ID, nodeID uint64, lock *data.LockInfo) error {
|
||||||
|
|
Loading…
Reference in a new issue