[#Issue] tmp

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
Denis Kirillov 2024-04-17 11:14:46 +03:00
parent 30ae1c469e
commit 7a1665c660
16 changed files with 647 additions and 1299 deletions

View file

@ -61,8 +61,6 @@ func (e ExtendedObjectInfo) Version() string {
// BaseNodeVersion is minimal node info from tree service.
// Basically used for "system" object.
type BaseNodeVersion struct {
ID uint64
ParenID uint64
OID oid.ID
Timestamp uint64
Size uint64
@ -100,9 +98,6 @@ type ObjectTaggingInfo struct {
// MultipartInfo is multipart upload information.
type MultipartInfo struct {
// ID is node id in tree service.
// It's ignored when creating a new multipart upload.
ID uint64
Key string
UploadID string
Owner user.ID
@ -139,8 +134,6 @@ func (p *PartInfo) GetETag(md5Enabled bool) string {
// LockInfo is lock information to create appropriate tree node.
type LockInfo struct {
id uint64
legalHoldOID oid.ID
setLegalHold bool
@ -150,12 +143,8 @@ type LockInfo struct {
isCompliance bool
}
func NewLockInfo(id uint64) *LockInfo {
return &LockInfo{id: id}
}
func (l LockInfo) ID() uint64 {
return l.id
func NewLockInfo() *LockInfo {
return &LockInfo{}
}
func (l *LockInfo) SetLegalHold(objID oid.ID) {

View file

@ -264,6 +264,19 @@ func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encr
return w
}
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
assertStatus(hc.t, w, http.StatusNoContent)
}
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Set(uploadIDQuery, uploadID)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
hc.Handler().AbortMultipartUploadHandler(w, r)
return w
}
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
w := completeMultipartUploadBase(hc, bktName, objName, uploadID, partsETags)
assertStatus(hc.t, w, http.StatusOK)

View file

@ -197,6 +197,19 @@ func TestGetObject(t *testing.T) {
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey)
}
func TestGetObjectPiloramaPrefix(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName, objName2 := "bucket", "obj", "obj2"
createBucket(hc, bktName)
hdr := putObjectContent(hc, bktName, objName, "content1")
putObjectContent(hc, bktName, objName2, "content2")
w := headObjectBase(hc, bktName, objName, emptyVersion)
assertStatus(t, w, http.StatusOK)
require.Equal(t, hdr.Get(api.ETag), w.Header().Get(api.ETag))
}
func TestGetObjectEnabledMD5(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket", "obj"

View file

@ -98,7 +98,7 @@ func TestMultipartReUploadPart(t *testing.T) {
innerUploadInfo, err := hc.tree.GetMultipartUpload(hc.context, bktInfo, objName, uploadInfo.UploadID)
require.NoError(t, err)
treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo.ID)
treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo)
require.NoError(t, err)
require.Len(t, treeParts, len(list.Parts))
@ -264,6 +264,25 @@ func TestMultipartUploadSize(t *testing.T) {
})
}
func TestMultipartUploadNewPilorama(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-test-list-parts", "object-multipart"
bktInfo := createBucket(hc, bktName)
partSize := 1024
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSize)
abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
require.Len(t, hc.tp.Objects(), 0)
parts, err := hc.tree.GetParts(hc.Context(), bktInfo.BktInfo, &data.MultipartInfo{Key: objName, UploadID: uploadInfo.UploadID})
require.NoError(t, err)
require.Len(t, parts, 0)
}
func TestListParts(t *testing.T) {
hc := prepareHandlerContext(t)

View file

@ -94,13 +94,10 @@ func TestListObjectsWithOldTreeNodes(t *testing.T) {
}
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", 0, 0)
nodes, err := hc.treeMock.BurnedList(hc.Context(), bktInfo, "version", "")
require.NoError(hc.t, err)
for _, node := range nodes {
if node.GetNodeID() == 0 {
continue
}
meta := make(map[string]string, len(node.GetMeta()))
for _, m := range node.GetMeta() {
if m.GetKey() != "Created" && m.GetKey() != "Owner" {
@ -108,7 +105,7 @@ func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
}
}
err = hc.treeMock.MoveNode(hc.Context(), bktInfo, "version", node.GetNodeID(), node.GetParentID(), meta)
err = hc.treeMock.BurnedAdd(hc.Context(), bktInfo, "version", node.GetKey(), meta)
require.NoError(hc.t, err)
}
}

View file

@ -659,10 +659,10 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
}
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
return n.handleObjectDeleteErrors(ctx, bkt, obj, nodeVersion.ID)
return n.handleObjectDeleteErrors(ctx, bkt, obj, nodeVersion)
}
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID)
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion)
n.cache.CleanListCacheEntriesContainingObject(obj.Name, bkt.CID)
return obj
}
@ -689,7 +689,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
if nullVersionToDelete != nil {
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nullVersionToDelete, obj); obj.Error != nil {
return n.handleObjectDeleteErrors(ctx, bkt, obj, nullVersionToDelete.ID)
return n.handleObjectDeleteErrors(ctx, bkt, obj, nullVersionToDelete)
}
}
}
@ -718,7 +718,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
IsUnversioned: settings.VersioningSuspended(),
}
if _, obj.Error = n.treeService.AddVersion(ctx, bkt, newVersion); obj.Error != nil {
if obj.Error = n.treeService.AddVersion(ctx, bkt, newVersion); obj.Error != nil {
return obj
}
@ -737,7 +737,7 @@ func (n *layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
return obj
}
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeVersion *data.NodeVersion) *VersionedObject {
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
return obj
}
@ -745,7 +745,7 @@ func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketIn
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion)
if obj.Error == nil {
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
}

View file

@ -142,7 +142,7 @@ func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*Lis
ListType: ListObjectsV2Type,
}
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
objects, next, err := n.getLatestObjectsVersionsNew(ctx, prm)
if err != nil {
return nil, err
}
@ -225,6 +225,57 @@ func (n *layer) getLatestObjectsVersions(ctx context.Context, p commonLatestVers
return
}
func (n *layer) getLatestObjectsVersionsNew(ctx context.Context, p commonLatestVersionsListingParams) (objects []*data.ExtendedNodeVersion, next *data.ExtendedNodeVersion, err error) {
if p.MaxKeys == 0 {
return nil, nil, nil
}
owner := n.BearerOwner(ctx)
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, p.Bookmark)
session := n.cache.GetListSession(owner, cacheKey)
if session == nil {
session = &data.ListSession{NamesMap: make(map[string]struct{})}
session.Stream, err = n.treeService.InitVersionsByPrefixStream(ctx, p.BktInfo, p.Prefix, "", true)
if err != nil {
return nil, nil, err
}
} else {
session.Stream, err = n.treeService.InitVersionsByPrefixStream(ctx, p.BktInfo, p.Prefix, session.Next[0].Name(), true)
if err != nil {
return nil, nil, err
}
}
session.Context, session.Cancel = context.WithCancel(ctx)
generator, errorCh := nodesGeneratorStream(ctx, p.commonVersionsListingParams, session)
objOutCh, err := n.initWorkerPool(ctx, 2, p.commonVersionsListingParams, generator)
if err != nil {
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
}
objects = make([]*data.ExtendedNodeVersion, 0, p.MaxKeys+1)
objects = append(objects, session.Next...)
for obj := range objOutCh {
objects = append(objects, obj)
}
if err = <-errorCh; err != nil {
return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err)
}
sort.Slice(objects, func(i, j int) bool { return objects[i].NodeVersion.FilePath < objects[j].NodeVersion.FilePath })
if len(objects) > p.MaxKeys {
next = objects[p.MaxKeys]
session.Stream = nil
n.putListLatestVersionsSession(ctx, p, session, objects)
objects = objects[:p.MaxKeys]
}
return
}
func (n *layer) getAllObjectsVersions(ctx context.Context, p commonVersionsListingParams) ([]*data.ExtendedNodeVersion, bool, error) {
if p.MaxKeys == 0 {
return nil, false, nil
@ -337,7 +388,7 @@ func (n *layer) initNewVersionsByPrefixSession(ctx context.Context, p commonVers
session.Context = middleware.SetBox(session.Context, &middleware.Box{AccessBox: bd})
}
session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, latestOnly)
session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, "", latestOnly)
if err != nil {
return nil, err
}

View file

@ -292,7 +292,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
MD5: hex.EncodeToString(md5Hash),
}
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, partInfo)
oldPartIDNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
if err != nil && !oldPartIDNotFound {
return nil, err
@ -632,7 +632,7 @@ func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
return nil, nil, err
}
parts, err := n.treeService.GetParts(ctx, p.Bkt, multipartInfo.ID)
parts, err := n.treeService.GetParts(ctx, p.Bkt, multipartInfo)
if err != nil {
return nil, nil, err
}

View file

@ -316,7 +316,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
newVersion.MD5 = hex.EncodeToString(md5Hash)
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
if err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
}

View file

@ -39,7 +39,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
}
}
lockInfo, err := n.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode.ID)
lockInfo, err := n.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode)
if err != nil && !errorsStd.Is(err, tree.ErrNodeNotFound) {
return err
}
@ -92,7 +92,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
}
}
if err = n.treeService.PutLock(ctx, p.ObjVersion.BktInfo, versionNode.ID, lockInfo); err != nil {
if err = n.treeService.PutLock(ctx, p.ObjVersion.BktInfo, versionNode, lockInfo); err != nil {
return fmt.Errorf("couldn't put lock into tree: %w", err)
}
@ -141,7 +141,7 @@ func (n *layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion)
return nil, err
}
lockInfo, err := n.treeService.GetLock(ctx, objVersion.BktInfo, versionNode.ID)
lockInfo, err := n.treeService.GetLock(ctx, objVersion.BktInfo, versionNode)
if err != nil && !errorsStd.Is(err, tree.ErrNodeNotFound) {
return nil, err
}

2
go.mod
View file

@ -37,7 +37,7 @@ require (
google.golang.org/protobuf v1.33.0
)
replace git.frostfs.info/TrueCloudLab/frostfs-sdk-go => ../frostfs-sdk-go
replace git.frostfs.info/TrueCloudLab/frostfs-sdk-go => git.frostfs.info/dkirillov/frostfs-sdk-go v0.0.0-20240621142828-0ee3fcc18ffd
require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect

4
go.sum
View file

@ -44,8 +44,6 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531132048-ebd8fcd1685f h1:vBLC1OSGMSn7lRJv/p1of0veifuBdZdztVrF9Vn+UFk=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531132048-ebd8fcd1685f/go.mod h1:4AObM67VUqkXQJlODTFThFnuMGEuK8h9DrAXHDZqvCU=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a h1:Bk1fB4cQASPKgAVGCdlBOEp5ohZfDxqK6fZM8eP+Emo=
@ -56,6 +54,8 @@ git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjq
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
git.frostfs.info/dkirillov/frostfs-sdk-go v0.0.0-20240621142828-0ee3fcc18ffd h1:tLkZ8XzgDbElZvYHJSAR2FTx2IY0F3Y/FIYWRY3X4rY=
git.frostfs.info/dkirillov/frostfs-sdk-go v0.0.0-20240621142828-0ee3fcc18ffd/go.mod h1:e7H9nNFpx1Tj3R20Zoxy0Vo6Srlb6zV5L7ZQXqg9rn4=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=

View file

@ -15,30 +15,6 @@ import (
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
)
type GetNodeByPathResponseInfoWrapper struct {
response *grpcService.GetNodeByPathResponse_Info
}
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
return n.response.GetNodeId()
}
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
return n.response.GetParentId()
}
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
type GetSubTreeResponseBodyWrapper struct {
response *grpcService.GetSubTreeResponse_Body
}
@ -63,6 +39,87 @@ func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
return res
}
type BurnedListResponseBodyWrapper struct {
response *grpcService.BurnedListResponse_Body
}
func (n BurnedListResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
func (n BurnedListResponseBodyWrapper) GetKey() string {
return n.response.GetKey()
}
func (n BurnedListResponseBodyWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
type BurnedGetResponseBodyWrapper struct {
response *grpcService.BurnedGetResponse_Body
key string
}
func (n BurnedGetResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
func (n BurnedGetResponseBodyWrapper) GetKey() string {
return n.key
}
func (n BurnedGetResponseBodyWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
type BurnedListByPrefixResponseBodyInfoWrapper struct {
response *grpcService.BurnedListByPrefixResponse_Body_Info
}
func (n BurnedListByPrefixResponseBodyInfoWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
func (n BurnedListByPrefixResponseBodyInfoWrapper) GetKey() string {
return n.response.GetKey()
}
func (n BurnedListByPrefixResponseBodyInfoWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
type BurnedGetLatestByPrefixResponseBodyWrapper struct {
response *grpcService.BurnedGetLatestByPrefixResponse_Body
}
func (n BurnedGetLatestByPrefixResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
func (n BurnedGetLatestByPrefixResponseBodyWrapper) GetKey() string {
return n.response.GetKey()
}
func (n BurnedGetLatestByPrefixResponseBodyWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
type PoolWrapper struct {
p *treepool.Pool
}
@ -71,77 +128,110 @@ func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
return &PoolWrapper{p: p}
}
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
poolPrm := treepool.GetNodesParams{
CID: prm.BktInfo.CID,
TreeID: prm.TreeID,
Path: prm.Path,
Meta: prm.Meta,
PathAttribute: tree.FileNameKey,
LatestOnly: prm.LatestOnly,
AllAttrs: prm.AllAttrs,
BearerToken: getBearer(ctx, prm.BktInfo),
func (w *PoolWrapper) BurnedAdd(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string, meta map[string]string) error {
_, err := w.p.BurnedAdd(ctx, treepool.BurnedAddParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
Meta: meta,
BearerToken: getBearer(ctx, bktInfo),
})
return handleError(err)
}
nodes, err := w.p.GetNodes(ctx, poolPrm)
func (w *PoolWrapper) BurnedGet(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string) (tree.NodeResponse, error) {
resp, err := w.p.BurnedGet(ctx, treepool.BurnedGetParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
BearerToken: getBearer(ctx, bktInfo),
})
return BurnedGetResponseBodyWrapper{response: resp, key: key}, handleError(err)
}
func (w *PoolWrapper) BurnedGetLatestByPrefix(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefix string) (tree.NodeResponse, error) {
resp, err := w.p.BurnedGetLatestByPrefix(ctx, treepool.BurnedGetLatestByPrefixParams{
CID: bktInfo.CID,
TreeID: treeID,
Prefix: prefix,
BearerToken: getBearer(ctx, bktInfo),
})
return BurnedGetLatestByPrefixResponseBodyWrapper{resp}, handleError(err)
}
func (w *PoolWrapper) BurnedRemove(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string) error {
return handleError(w.p.BurnedRemove(ctx, treepool.BurnedRemoveParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
BearerToken: getBearer(ctx, bktInfo),
}))
}
func (w *PoolWrapper) BurnedListByPrefix(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefix string) ([]tree.NodeResponse, error) {
resp, err := w.p.BurnedListByPrefix(ctx, treepool.BurnedListByPrefixParams{
CID: bktInfo.CID,
TreeID: treeID,
Prefix: prefix,
BearerToken: getBearer(ctx, bktInfo),
})
if err != nil {
return nil, handleError(err)
}
res := make([]tree.NodeResponse, len(nodes))
for i, info := range nodes {
res[i] = GetNodeByPathResponseInfoWrapper{info}
res := make([]tree.NodeResponse, len(resp.GetList()))
for i, info := range resp.GetList() {
res[i] = BurnedListByPrefixResponseBodyInfoWrapper{response: info}
}
return res, nil
}
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
poolPrm := treepool.GetSubTreeParams{
func (w *PoolWrapper) BurnedList(ctx context.Context, bktInfo *data.BucketInfo, treeID, start string) ([]tree.NodeResponse, error) {
cli, err := w.p.BurnedList(ctx, treepool.BurnedListParams{
CID: bktInfo.CID,
TreeID: treeID,
RootID: rootID,
Depth: depth,
Start: start,
BearerToken: getBearer(ctx, bktInfo),
}
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
})
if err != nil {
return nil, handleError(err)
}
var subtree []tree.NodeResponse
var res []tree.NodeResponse
node, err := subTreeReader.Next()
node, err := cli.Next()
for err == nil {
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
node, err = subTreeReader.Next()
res = append(res, BurnedListResponseBodyWrapper{node})
node, err = cli.Next()
}
if err != nil && err != io.EOF {
return nil, handleError(err)
}
return subtree, nil
return res, nil
}
type SubTreeStreamImpl struct {
r *treepool.SubTreeReader
buffer []*grpcService.GetSubTreeResponse_Body
const bufSize = 1000
type BurnedListStreamImpl struct {
r *treepool.BurnedListReader
buffer []*grpcService.BurnedListResponse_Body
eof bool
index int
ln int
}
const bufSize = 1000
func (s *SubTreeStreamImpl) Next() (tree.NodeResponse, error) {
func (s *BurnedListStreamImpl) Next() (tree.NodeResponse, error) {
if s.index != -1 {
node := s.buffer[s.index]
s.index++
if s.index >= s.ln {
s.index = -1
}
return GetSubTreeResponseBodyWrapper{response: node}, nil
return BurnedListResponseBodyWrapper{response: node}, nil
}
if s.eof {
return nil, io.EOF
@ -162,114 +252,7 @@ func (s *SubTreeStreamImpl) Next() (tree.NodeResponse, error) {
return s.Next()
}
func (w *PoolWrapper) GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) (tree.SubTreeStream, error) {
poolPrm := treepool.GetSubTreeParams{
CID: bktInfo.CID,
TreeID: treeID,
RootID: rootID,
Depth: depth,
BearerToken: getBearer(ctx, bktInfo),
Order: treepool.AscendingOrder,
}
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
return &SubTreeStreamImpl{
r: subTreeReader,
buffer: make([]*grpcService.GetSubTreeResponse_Body, bufSize),
index: -1,
}, nil
}
func (w *PoolWrapper) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error) {
nodeID, err := w.p.AddNode(ctx, treepool.AddNodeParams{
CID: bktInfo.CID,
TreeID: treeID,
Parent: parent,
Meta: meta,
BearerToken: getBearer(ctx, bktInfo),
})
return nodeID, handleError(err)
}
func (w *PoolWrapper) AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
nodeID, err := w.p.AddNodeByPath(ctx, treepool.AddNodeByPathParams{
CID: bktInfo.CID,
TreeID: treeID,
Path: path,
Meta: meta,
PathAttribute: tree.FileNameKey,
BearerToken: getBearer(ctx, bktInfo),
})
return nodeID, handleError(err)
}
func (w *PoolWrapper) MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
return handleError(w.p.MoveNode(ctx, treepool.MoveNodeParams{
CID: bktInfo.CID,
TreeID: treeID,
NodeID: nodeID,
ParentID: parentID,
Meta: meta,
BearerToken: getBearer(ctx, bktInfo),
}))
}
func (w *PoolWrapper) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
return handleError(w.p.RemoveNode(ctx, treepool.RemoveNodeParams{
CID: bktInfo.CID,
TreeID: treeID,
NodeID: nodeID,
BearerToken: getBearer(ctx, bktInfo),
}))
}
func (w *PoolWrapper) BurnedAddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string, meta map[string]string) ([]byte, error) {
resp, err := w.p.BurnedAdd(ctx, treepool.BurnedAddParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
Meta: meta,
BearerToken: getBearer(ctx, bktInfo),
})
return resp.GetVersion(), handleError(err)
}
func (w *PoolWrapper) BurnedGetNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string, version []byte) ([]*grpcService.KeyValue, error) {
resp, err := w.p.BurnedGet(ctx, treepool.BurnedGetParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
Version: version,
BearerToken: getBearer(ctx, bktInfo),
})
return resp.GetMeta(), handleError(err)
}
func (w *PoolWrapper) BurnedRemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string) error {
return handleError(w.p.BurnedRemove(ctx, treepool.BurnedRemoveParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
BearerToken: getBearer(ctx, bktInfo),
}))
}
func (w *PoolWrapper) BurnedListVersions(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string) ([]*grpcService.BurnedListVersionsResponse_Body_Info, error) {
resp, err := w.p.BurnedListVersions(ctx, treepool.BurnedListVersionsParams{
CID: bktInfo.CID,
TreeID: treeID,
Key: key,
BearerToken: getBearer(ctx, bktInfo),
})
return resp.GetList(), handleError(err)
}
func (w *PoolWrapper) BurnedList(ctx context.Context, bktInfo *data.BucketInfo, treeID, start string) ([]*grpcService.BurnedListResponse_Body, error) {
func (w *PoolWrapper) BurnedListStream(ctx context.Context, bktInfo *data.BucketInfo, treeID, start string) (tree.SubTreeStream, error) {
cli, err := w.p.BurnedList(ctx, treepool.BurnedListParams{
CID: bktInfo.CID,
TreeID: treeID,
@ -280,8 +263,11 @@ func (w *PoolWrapper) BurnedList(ctx context.Context, bktInfo *data.BucketInfo,
return nil, handleError(err)
}
res, err := cli.ReadAll()
return res, handleError(err)
return &BurnedListStreamImpl{
r: cli,
buffer: make([]*grpcService.BurnedListResponse_Body, bufSize),
index: -1,
}, nil
}
func getBearer(ctx context.Context, bktInfo *data.BucketInfo) []byte {

File diff suppressed because it is too large Load diff

View file

@ -2,14 +2,13 @@ package tree
import (
"context"
"fmt"
"io"
"sort"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
"golang.org/x/exp/slices"
)
type nodeMeta struct {
@ -27,17 +26,8 @@ func (m nodeMeta) GetValue() []byte {
type nodeResponse struct {
meta []nodeMeta
nodeID uint64
parentID uint64
timestamp uint64
}
func (n nodeResponse) GetNodeID() uint64 {
return n.nodeID
}
func (n nodeResponse) GetParentID() uint64 {
return n.parentID
key string
}
func (n nodeResponse) GetTimestamp() uint64 {
@ -49,16 +39,12 @@ func (n nodeResponse) GetMeta() []Meta {
for i, value := range n.meta {
res[i] = value
}
return res
return append(res, nodeMeta{key: "timestamp", value: []byte(strconv.FormatUint(n.timestamp, 10))})
}
func (n nodeResponse) getValue(key string) string {
for _, value := range n.meta {
if value.key == key {
return string(value.value)
}
}
return ""
func (n nodeResponse) GetKey() string {
return n.key
}
type ServiceClientMemory struct {
@ -67,117 +53,7 @@ type ServiceClientMemory struct {
type containerInfo struct {
bkt *data.BucketInfo
trees map[string]memoryTree
}
type memoryTree struct {
idCounter uint64
treeData *treeNodeMemory
}
type treeNodeMemory struct {
data nodeResponse
parent *treeNodeMemory
children []*treeNodeMemory
}
func (t *treeNodeMemory) getNode(nodeID uint64) *treeNodeMemory {
if t.data.nodeID == nodeID {
return t
}
for _, child := range t.children {
if node := child.getNode(nodeID); node != nil {
return node
}
}
return nil
}
func (t *memoryTree) getNodesByPath(path []string) []nodeResponse {
if len(path) == 0 {
return nil
}
var res []nodeResponse
for _, child := range t.treeData.children {
res = child.listNodesByPath(res, path)
}
return res
}
func (t *treeNodeMemory) listNodesByPath(res []nodeResponse, path []string) []nodeResponse {
if len(path) == 0 || t.data.getValue(FileNameKey) != path[0] {
return res
}
if len(path) == 1 {
return append(res, t.data)
}
for _, ch := range t.children {
res = ch.listNodesByPath(res, path[1:])
}
return res
}
func (t *memoryTree) createPathIfNotExist(parent *treeNodeMemory, path []string) *treeNodeMemory {
if len(path) == 0 {
return parent
}
var node *treeNodeMemory
for _, child := range parent.children {
if len(child.data.meta) == 1 && child.data.getValue(FileNameKey) == path[0] {
node = child
break
}
}
if node == nil {
node = &treeNodeMemory{
data: nodeResponse{
meta: []nodeMeta{{key: FileNameKey, value: []byte(path[0])}},
nodeID: t.idCounter,
parentID: parent.data.nodeID,
timestamp: uint64(time.Now().UnixMicro()),
},
parent: parent,
}
t.idCounter++
parent.children = append(parent.children, node)
}
return t.createPathIfNotExist(node, path[1:])
}
func (t *treeNodeMemory) removeChild(nodeID uint64) {
ind := -1
for i, ch := range t.children {
if ch.data.nodeID == nodeID {
ind = i
break
}
}
if ind != -1 {
t.children = append(t.children[:ind], t.children[ind+1:]...)
}
}
func (t *treeNodeMemory) listNodes(res []NodeResponse, depth uint32) []NodeResponse {
res = append(res, t.data)
if depth == 0 {
return res
}
for _, ch := range t.children {
res = ch.listNodes(res, depth-1)
}
return res
trees map[string]map[string]*nodeResponse
}
var _ ServiceClient = (*ServiceClientMemory)(nil)
@ -188,72 +64,121 @@ func NewTreeServiceClientMemory() (*ServiceClientMemory, error) {
}, nil
}
type nodeResponseWrapper struct {
nodeResponse
allAttr bool
attrs []string
}
func (n nodeResponseWrapper) GetMeta() []Meta {
res := make([]Meta, 0, len(n.meta))
for _, value := range n.meta {
if n.allAttr || slices.Contains(n.attrs, value.key) {
res = append(res, value)
}
}
return res
}
func (c *ServiceClientMemory) GetNodes(_ context.Context, p *GetNodesParams) ([]NodeResponse, error) {
cnr, ok := c.containers[p.BktInfo.CID.EncodeToString()]
if !ok {
return nil, nil
}
tr, ok := cnr.trees[p.TreeID]
if !ok {
return nil, nil
}
res := tr.getNodesByPath(p.Path)
sort.Slice(res, func(i, j int) bool {
return res[i].timestamp < res[j].timestamp
})
if p.LatestOnly && len(res) != 0 {
res = res[len(res)-1:]
}
res2 := make([]NodeResponse, len(res))
for i, n := range res {
res2[i] = nodeResponseWrapper{
nodeResponse: n,
allAttr: p.AllAttrs,
attrs: p.Meta,
func newContainerInfo(bktInfo *data.BucketInfo, treeID string) containerInfo {
return containerInfo{
bkt: bktInfo,
trees: map[string]map[string]*nodeResponse{
treeID: {},
},
}
}
return res2, nil
}
func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]NodeResponse, error) {
func (c *ServiceClientMemory) BurnedAdd(_ context.Context, bktInfo *data.BucketInfo, treeID string, key string, meta map[string]string) error {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, nil
cnr = newContainerInfo(bktInfo, treeID)
c.containers[bktInfo.CID.EncodeToString()] = cnr
}
tr, ok := cnr.trees[treeID]
if !ok {
tr = make(map[string]*nodeResponse)
}
tr[key] = &nodeResponse{
meta: metaToNodeMeta(meta),
key: key,
timestamp: uint64(time.Now().UnixMicro()),
}
cnr.trees[treeID] = tr
return nil
}
func (c *ServiceClientMemory) BurnedGet(_ context.Context, bktInfo *data.BucketInfo, treeID string, key string) (NodeResponse, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, ErrNodeNotFound
}
node := tr.treeData.getNode(rootID)
if node == nil {
node, ok := cnr.trees[treeID][key]
if !ok {
return nil, ErrNodeNotFound
}
// we depth-1 in case of uint32 and 0 as mark to get all subtree leads to overflow and depth is getting quite big to walk all tree levels
return node.listNodes(nil, depth-1), nil
return node, nil
}
func (c *ServiceClientMemory) BurnedGetLatestByPrefix(_ context.Context, bktInfo *data.BucketInfo, treeID string, prefix string) (NodeResponse, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, ErrNodeNotFound
}
var res *nodeResponse
for k, val := range cnr.trees[treeID] {
if strings.HasPrefix(k, prefix) {
if res == nil || res.timestamp < val.timestamp {
res = val
}
}
}
if res == nil {
return nil, ErrNodeNotFound
}
return res, nil
}
func (c *ServiceClientMemory) BurnedRemove(_ context.Context, bktInfo *data.BucketInfo, treeID string, key string) error {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return ErrNodeNotFound
}
tr, ok := cnr.trees[treeID]
if !ok {
return ErrNodeNotFound
}
delete(tr, key)
return nil
}
func (c *ServiceClientMemory) BurnedListByPrefix(_ context.Context, bktInfo *data.BucketInfo, treeID string, prefix string) ([]NodeResponse, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, ErrNodeNotFound
}
var res []NodeResponse
for k, val := range cnr.trees[treeID] {
if strings.HasPrefix(k, prefix) {
res = append(res, val)
}
}
sort.Slice(res, func(i, j int) bool {
return res[i].GetKey() < res[j].GetKey()
})
return res, nil
}
func (c *ServiceClientMemory) BurnedList(ctx context.Context, bktInfo *data.BucketInfo, treeID, start string) ([]NodeResponse, error) {
res, err := c.BurnedListByPrefix(ctx, bktInfo, treeID, "")
if err != nil {
return nil, err
}
for i := range res {
if res[i].GetKey() > start {
return res[i:], nil
}
}
return nil, nil
}
type SubTreeStreamMemoryImpl struct {
@ -273,230 +198,20 @@ func (s *SubTreeStreamMemoryImpl) Next() (NodeResponse, error) {
return s.res[s.offset-1], nil
}
func (c *ServiceClientMemory) GetSubTreeStream(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) (SubTreeStream, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return &SubTreeStreamMemoryImpl{err: ErrNodeNotFound}, nil
func (c *ServiceClientMemory) BurnedListStream(ctx context.Context, bktInfo *data.BucketInfo, treeID, start string) (SubTreeStream, error) {
res, err := c.BurnedListByPrefix(ctx, bktInfo, treeID, "")
if err != nil {
return nil, err
}
tr, ok := cnr.trees[treeID]
if !ok {
return nil, ErrNodeNotFound
}
node := tr.treeData.getNode(rootID)
if node == nil {
return nil, ErrNodeNotFound
}
sortNode(tr.treeData)
return &SubTreeStreamMemoryImpl{
res: node.listNodes(nil, depth-1),
offset: 0,
}, nil
}
func newContainerInfo(bktInfo *data.BucketInfo, treeID string) containerInfo {
return containerInfo{
bkt: bktInfo,
trees: map[string]memoryTree{
treeID: {
idCounter: 1,
treeData: &treeNodeMemory{
data: nodeResponse{
timestamp: uint64(time.Now().UnixMicro()),
},
},
},
},
for i, item := range res {
if start <= item.GetKey() {
res = res[i:]
break
}
}
func newMemoryTree() memoryTree {
return memoryTree{
idCounter: 1,
treeData: &treeNodeMemory{
data: nodeResponse{
timestamp: uint64(time.Now().UnixMicro()),
},
},
}
}
func (c *ServiceClientMemory) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error) {
return c.AddNodeBase(ctx, bktInfo, treeID, parent, meta, true)
}
func (c *ServiceClientMemory) AddNodeBase(_ context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string, needSort bool) (uint64, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
cnr = newContainerInfo(bktInfo, treeID)
c.containers[bktInfo.CID.EncodeToString()] = cnr
}
tr, ok := cnr.trees[treeID]
if !ok {
tr = newMemoryTree()
cnr.trees[treeID] = tr
}
parentNode := tr.treeData.getNode(parent)
if parentNode == nil {
return 0, ErrNodeNotFound
}
newID := tr.idCounter
tr.idCounter++
tn := &treeNodeMemory{
data: nodeResponse{
meta: metaToNodeMeta(meta),
nodeID: newID,
parentID: parent,
timestamp: uint64(time.Now().UnixMicro()),
},
parent: parentNode,
}
parentNode.children = append(parentNode.children, tn)
if needSort {
sortNodes(parentNode.children)
}
cnr.trees[treeID] = tr
return newID, nil
}
func (c *ServiceClientMemory) AddNodeByPath(_ context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
cnr = newContainerInfo(bktInfo, treeID)
c.containers[bktInfo.CID.EncodeToString()] = cnr
}
tr, ok := cnr.trees[treeID]
if !ok {
tr = newMemoryTree()
cnr.trees[treeID] = tr
}
parentNode := tr.createPathIfNotExist(tr.treeData, path)
if parentNode == nil {
return 0, fmt.Errorf("create path '%s'", path)
}
newID := tr.idCounter
tr.idCounter++
tn := &treeNodeMemory{
data: nodeResponse{
meta: metaToNodeMeta(meta),
nodeID: newID,
parentID: parentNode.data.nodeID,
timestamp: uint64(time.Now().UnixMicro()),
},
parent: parentNode,
}
parentNode.children = append(parentNode.children, tn)
cnr.trees[treeID] = tr
return newID, nil
}
func (c *ServiceClientMemory) MoveNode(_ context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return ErrNodeNotFound
}
tr, ok := cnr.trees[treeID]
if !ok {
return ErrNodeNotFound
}
node := tr.treeData.getNode(nodeID)
if node == nil {
return ErrNodeNotFound
}
newParent := tr.treeData.getNode(parentID)
if newParent == nil {
return ErrNodeNotFound
}
node.data.meta = metaToNodeMeta(meta)
node.data.parentID = parentID
newParent.children = append(newParent.children, node)
node.parent.removeChild(nodeID)
return nil
}
func sortNode(node *treeNodeMemory) {
if node == nil {
return
}
sortNodes(node.children)
for _, child := range node.children {
sortNode(child)
}
}
func sortNodes(list []*treeNodeMemory) {
sort.Slice(list, func(i, j int) bool {
return list[i].data.getValue(FileNameKey) < list[j].data.getValue(FileNameKey)
})
}
func (c *ServiceClientMemory) RemoveNode(_ context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return ErrNodeNotFound
}
tr, ok := cnr.trees[treeID]
if !ok {
return ErrNodeNotFound
}
node := tr.treeData.getNode(nodeID)
if node == nil {
return ErrNodeNotFound
}
node.parent.removeChild(nodeID)
return nil
}
func (c *ServiceClientMemory) BurnedAddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string, meta map[string]string) ([]byte, error) {
//TODO implement me
panic("implement me")
}
func (c *ServiceClientMemory) BurnedGetNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string, version []byte) ([]*grpcService.KeyValue, error) {
//TODO implement me
panic("implement me")
}
func (c *ServiceClientMemory) BurnedRemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string) error {
//TODO implement me
panic("implement me")
}
func (c *ServiceClientMemory) BurnedListVersions(ctx context.Context, bktInfo *data.BucketInfo, treeID string, key string) ([]*grpcService.BurnedListVersionsResponse_Body_Info, error) {
//TODO implement me
panic("implement me")
}
func (c *ServiceClientMemory) BurnedList(ctx context.Context, bktInfo *data.BucketInfo, treeID, start string) ([]*grpcService.BurnedListResponse_Body, error) {
//TODO implement me
panic("implement me")
return &SubTreeStreamMemoryImpl{res: res}, nil
}
func metaToNodeMeta(m map[string]string) []nodeMeta {

View file

@ -163,12 +163,11 @@ func TestTreeServiceAddVersion(t *testing.T) {
IsUnversioned: true,
}
nodeID, err := treeService.AddVersion(ctx, bktInfo, version)
err = treeService.AddVersion(ctx, bktInfo, version)
require.NoError(t, err)
storedNode, err := treeService.GetUnversioned(ctx, bktInfo, "path/to/version")
require.NoError(t, err)
require.Equal(t, nodeID, storedNode.ID)
require.Equal(t, version.BaseNodeVersion.Size, storedNode.Size)
require.Equal(t, version.BaseNodeVersion.ETag, storedNode.ETag)
require.Equal(t, version.BaseNodeVersion.ETag, storedNode.ETag)
@ -185,7 +184,7 @@ func TestGetLatestNode(t *testing.T) {
for _, tc := range []struct {
name string
nodes []NodeResponse
expectedNodeID uint64
exceptedTimestamp uint64
error bool
}{
{
@ -197,8 +196,6 @@ func TestGetLatestNode(t *testing.T) {
name: "one node of the object version",
nodes: []NodeResponse{
nodeResponse{
nodeID: 1,
parentID: 0,
timestamp: 1,
meta: []nodeMeta{
{
@ -208,20 +205,16 @@ func TestGetLatestNode(t *testing.T) {
},
},
},
expectedNodeID: 1,
exceptedTimestamp: 1,
},
{
name: "one node of the object version and one node of the secondary object",
nodes: []NodeResponse{
nodeResponse{
nodeID: 2,
parentID: 0,
timestamp: 3,
meta: []nodeMeta{},
},
nodeResponse{
nodeID: 1,
parentID: 0,
timestamp: 1,
meta: []nodeMeta{
{
@ -231,20 +224,16 @@ func TestGetLatestNode(t *testing.T) {
},
},
},
expectedNodeID: 1,
exceptedTimestamp: 1,
},
{
name: "all nodes represent a secondary object",
nodes: []NodeResponse{
nodeResponse{
nodeID: 2,
parentID: 0,
timestamp: 3,
meta: []nodeMeta{},
},
nodeResponse{
nodeID: 4,
parentID: 0,
timestamp: 5,
meta: []nodeMeta{},
},
@ -255,8 +244,6 @@ func TestGetLatestNode(t *testing.T) {
name: "several nodes of different types and with different timestamp",
nodes: []NodeResponse{
nodeResponse{
nodeID: 1,
parentID: 0,
timestamp: 1,
meta: []nodeMeta{
{
@ -266,14 +253,10 @@ func TestGetLatestNode(t *testing.T) {
},
},
nodeResponse{
nodeID: 3,
parentID: 0,
timestamp: 3,
meta: []nodeMeta{},
},
nodeResponse{
nodeID: 4,
parentID: 0,
timestamp: 4,
meta: []nodeMeta{
{
@ -283,13 +266,11 @@ func TestGetLatestNode(t *testing.T) {
},
},
nodeResponse{
nodeID: 6,
parentID: 0,
timestamp: 6,
meta: []nodeMeta{},
},
},
expectedNodeID: 4,
exceptedTimestamp: 4,
},
} {
t.Run(tc.name, func(t *testing.T) {
@ -298,9 +279,13 @@ func TestGetLatestNode(t *testing.T) {
require.Error(t, err)
return
}
require.NoError(t, err)
tNode, err := newTreeNode(actualNode)
require.NoError(t, err)
require.NoError(t, err)
require.Equal(t, tc.expectedNodeID, actualNode.GetNodeID())
require.EqualValues(t, tc.exceptedTimestamp, tNode.TimeStamp)
})
}
}