2023-03-14 14:31:15 +00:00
|
|
|
package tree
|
2022-04-22 07:18:21 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2024-02-12 12:28:55 +00:00
|
|
|
"encoding/hex"
|
2022-04-22 07:18:21 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2023-10-09 06:57:33 +00:00
|
|
|
"io"
|
2023-04-03 14:26:17 +00:00
|
|
|
"sort"
|
2022-04-22 07:18:21 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2022-05-20 15:02:00 +00:00
|
|
|
"time"
|
2022-04-22 07:18:21 +00:00
|
|
|
|
2023-03-07 14:38:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
2024-09-27 08:14:45 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
2024-09-27 08:13:13 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
2023-07-05 14:05:45 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
2023-08-23 11:07:52 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
2024-07-15 15:35:54 +00:00
|
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
2023-03-07 14:38:08 +00:00
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
2024-02-12 12:28:55 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
2023-04-03 14:26:17 +00:00
|
|
|
"go.uber.org/zap"
|
2024-01-21 11:13:37 +00:00
|
|
|
"golang.org/x/exp/maps"
|
2022-04-22 07:18:21 +00:00
|
|
|
)
|
|
|
|
|
2022-05-12 01:48:17 +00:00
|
|
|
type (
|
2023-03-14 14:31:15 +00:00
|
|
|
Tree struct {
|
|
|
|
service ServiceClient
|
2023-04-03 14:26:17 +00:00
|
|
|
log *zap.Logger
|
2022-05-12 01:48:17 +00:00
|
|
|
}
|
2022-04-22 07:18:21 +00:00
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
// ServiceClient is a client to interact with tree service.
|
|
|
|
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
|
|
|
ServiceClient interface {
|
|
|
|
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
2024-08-21 12:10:34 +00:00
|
|
|
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
|
2024-07-12 12:27:00 +00:00
|
|
|
GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) (SubTreeStream, error)
|
2023-03-14 14:31:15 +00:00
|
|
|
AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error)
|
|
|
|
AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error)
|
|
|
|
MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error
|
|
|
|
RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error
|
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
SubTreeStream interface {
|
|
|
|
Next() (NodeResponse, error)
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
treeNode struct {
|
2024-07-12 12:27:00 +00:00
|
|
|
ID []uint64
|
|
|
|
ParentID []uint64
|
2022-05-18 07:48:30 +00:00
|
|
|
ObjID oid.ID
|
2024-07-12 12:27:00 +00:00
|
|
|
TimeStamp []uint64
|
2023-06-01 13:45:28 +00:00
|
|
|
Size uint64
|
2022-05-12 01:48:17 +00:00
|
|
|
Meta map[string]string
|
|
|
|
}
|
2022-05-25 01:58:25 +00:00
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
multiSystemNode struct {
|
|
|
|
// the first element is latest
|
|
|
|
nodes []*treeNode
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
GetNodesParams struct {
|
2022-09-13 09:44:18 +00:00
|
|
|
BktInfo *data.BucketInfo
|
2022-05-25 01:58:25 +00:00
|
|
|
TreeID string
|
|
|
|
Path []string
|
|
|
|
Meta []string
|
|
|
|
LatestOnly bool
|
|
|
|
AllAttrs bool
|
|
|
|
}
|
2022-05-12 01:48:17 +00:00
|
|
|
)
|
2022-04-22 07:18:21 +00:00
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
const (
|
|
|
|
FileNameKey = "FileName"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
|
2024-09-27 08:13:13 +00:00
|
|
|
ErrNodeNotFound = tree.ErrNodeNotFound
|
2023-03-14 14:31:15 +00:00
|
|
|
|
|
|
|
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
|
2024-09-27 08:13:13 +00:00
|
|
|
ErrNodeAccessDenied = tree.ErrNodeAccessDenied
|
2023-06-13 14:47:31 +00:00
|
|
|
|
|
|
|
// ErrGatewayTimeout is returned from ServiceClient service in case of timeout error.
|
2024-09-27 08:14:45 +00:00
|
|
|
ErrGatewayTimeout = frostfs.ErrGatewayTimeout
|
2024-08-02 13:33:39 +00:00
|
|
|
|
|
|
|
errNodeDoesntContainFileName = fmt.Errorf("node doesn't contain FileName")
|
2023-03-14 14:31:15 +00:00
|
|
|
)
|
|
|
|
|
2022-04-22 07:18:21 +00:00
|
|
|
const (
|
2022-07-19 14:58:18 +00:00
|
|
|
versioningKV = "Versioning"
|
2024-02-12 12:28:55 +00:00
|
|
|
cannedACLKV = "cannedACL"
|
|
|
|
ownerKeyKV = "ownerKey"
|
2022-06-15 14:43:51 +00:00
|
|
|
lockConfigurationKV = "LockConfiguration"
|
2022-05-17 14:56:05 +00:00
|
|
|
oidKV = "OID"
|
2024-07-15 15:35:54 +00:00
|
|
|
cidKV = "CID"
|
2023-03-14 14:31:15 +00:00
|
|
|
|
2023-06-27 12:49:20 +00:00
|
|
|
isCombinedKV = "IsCombined"
|
2023-03-14 14:31:15 +00:00
|
|
|
isUnversionedKV = "IsUnversioned"
|
|
|
|
isTagKV = "IsTag"
|
|
|
|
uploadIDKV = "UploadId"
|
|
|
|
partNumberKV = "Number"
|
|
|
|
sizeKV = "Size"
|
|
|
|
etagKV = "ETag"
|
2023-10-02 08:52:07 +00:00
|
|
|
md5KV = "MD5"
|
2023-12-04 06:42:25 +00:00
|
|
|
finishedKV = "Finished"
|
2024-07-22 09:00:17 +00:00
|
|
|
creationEpochKV = "CreationEpoch"
|
2022-05-20 15:02:00 +00:00
|
|
|
|
2022-05-26 13:11:14 +00:00
|
|
|
// keys for lock.
|
|
|
|
isLockKV = "IsLock"
|
|
|
|
legalHoldOIDKV = "LegalHoldOID"
|
|
|
|
retentionOIDKV = "RetentionOID"
|
|
|
|
untilDateKV = "UntilDate"
|
|
|
|
isComplianceKV = "IsCompliance"
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
// keys for delete marker nodes.
|
2022-06-15 14:43:51 +00:00
|
|
|
isDeleteMarkerKV = "IsDeleteMarker"
|
2022-05-20 15:02:00 +00:00
|
|
|
ownerKV = "Owner"
|
|
|
|
createdKV = "Created"
|
2022-04-22 07:18:21 +00:00
|
|
|
|
2024-08-12 11:22:56 +00:00
|
|
|
settingsFileName = "bucket-settings"
|
|
|
|
corsFilename = "bucket-cors"
|
|
|
|
bucketTaggingFilename = "bucket-tagging"
|
|
|
|
bucketLifecycleFilename = "bucket-lifecycle"
|
2022-05-12 01:50:52 +00:00
|
|
|
|
2022-05-18 07:48:30 +00:00
|
|
|
// versionTree -- ID of a tree with object versions.
|
2022-05-17 14:56:05 +00:00
|
|
|
versionTree = "version"
|
2022-05-18 07:48:30 +00:00
|
|
|
|
|
|
|
// systemTree -- ID of a tree with system objects
|
2024-06-25 12:24:29 +00:00
|
|
|
// i.e. bucket settings with versioning and lock configuration, cors.
|
2022-05-18 07:48:30 +00:00
|
|
|
systemTree = "system"
|
2022-05-17 14:56:05 +00:00
|
|
|
|
2022-05-25 10:16:51 +00:00
|
|
|
separator = "/"
|
2022-06-15 14:43:51 +00:00
|
|
|
userDefinedTagPrefix = "User-Tag-"
|
2022-05-20 08:26:35 +00:00
|
|
|
|
2022-09-05 13:48:05 +00:00
|
|
|
maxGetSubTreeDepth = 0 // means all subTree
|
2022-04-22 07:18:21 +00:00
|
|
|
)
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
// NewTree creates instance of Tree using provided address and create grpc connection.
|
2023-04-03 14:26:17 +00:00
|
|
|
func NewTree(service ServiceClient, log *zap.Logger) *Tree {
|
|
|
|
return &Tree{
|
|
|
|
service: service,
|
|
|
|
log: log,
|
|
|
|
}
|
2023-03-14 14:31:15 +00:00
|
|
|
}
|
2022-04-22 07:18:21 +00:00
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
type Meta interface {
|
|
|
|
GetKey() string
|
|
|
|
GetValue() []byte
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 08:26:35 +00:00
|
|
|
type NodeResponse interface {
|
2023-03-14 14:31:15 +00:00
|
|
|
GetMeta() []Meta
|
2024-07-12 12:27:00 +00:00
|
|
|
GetNodeID() []uint64
|
|
|
|
GetParentID() []uint64
|
|
|
|
GetTimestamp() []uint64
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
2024-07-18 13:40:55 +00:00
|
|
|
if err := validateNodeResponse(nodeInfo); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
tNode := &treeNode{
|
2023-03-14 14:31:15 +00:00
|
|
|
ID: nodeInfo.GetNodeID(),
|
|
|
|
ParentID: nodeInfo.GetParentID(),
|
2022-07-18 14:51:34 +00:00
|
|
|
TimeStamp: nodeInfo.GetTimestamp(),
|
|
|
|
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
|
|
|
}
|
2022-04-22 07:18:21 +00:00
|
|
|
|
|
|
|
for _, kv := range nodeInfo.GetMeta() {
|
2022-07-18 14:51:34 +00:00
|
|
|
switch kv.GetKey() {
|
|
|
|
case oidKV:
|
2024-07-12 12:27:00 +00:00
|
|
|
if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
2022-05-12 01:48:17 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-18 14:51:34 +00:00
|
|
|
case sizeKV:
|
|
|
|
if sizeStr := string(kv.GetValue()); len(sizeStr) > 0 {
|
|
|
|
var err error
|
2024-07-12 12:27:00 +00:00
|
|
|
if tNode.Size, err = strconv.ParseUint(sizeStr, 10, 64); err != nil {
|
2022-07-18 14:51:34 +00:00
|
|
|
return nil, fmt.Errorf("invalid size value '%s': %w", sizeStr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
2024-07-12 12:27:00 +00:00
|
|
|
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
2022-05-12 01:48:17 +00:00
|
|
|
}
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
return tNode, nil
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (n *treeNode) Get(key string) (string, bool) {
|
2022-05-12 01:48:17 +00:00
|
|
|
value, ok := n.Meta[key]
|
2022-04-22 07:18:21 +00:00
|
|
|
return value, ok
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (n *treeNode) FileName() (string, bool) {
|
|
|
|
value, ok := n.Meta[FileNameKey]
|
2022-06-10 11:57:41 +00:00
|
|
|
return value, ok
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func (n *treeNode) IsSplit() bool {
|
|
|
|
return len(n.ID) != 1 || len(n.ParentID) != 1 || len(n.TimeStamp) != 1
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *treeNode) GetLatestNodeIndex() int {
|
|
|
|
var (
|
|
|
|
maxTimestamp uint64
|
|
|
|
index int
|
|
|
|
)
|
|
|
|
|
|
|
|
for i, timestamp := range n.TimeStamp {
|
|
|
|
if timestamp > maxTimestamp {
|
|
|
|
maxTimestamp = timestamp
|
|
|
|
index = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return index
|
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
func newNodeVersion(log *zap.Logger, filePath string, node NodeResponse) (*data.NodeVersion, error) {
|
2024-07-12 12:27:00 +00:00
|
|
|
tNode, err := newTreeNode(node)
|
2022-05-17 14:56:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid tree node: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
return newNodeVersionFromTreeNode(log, filePath, tNode)
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func newNodeVersionFromTreeNode(log *zap.Logger, filePath string, treeNode *treeNode) (*data.NodeVersion, error) {
|
2022-05-17 14:56:05 +00:00
|
|
|
_, isUnversioned := treeNode.Get(isUnversionedKV)
|
|
|
|
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
2023-06-27 12:49:20 +00:00
|
|
|
_, isCombined := treeNode.Get(isCombinedKV)
|
2022-07-18 14:51:34 +00:00
|
|
|
eTag, _ := treeNode.Get(etagKV)
|
2023-10-02 08:52:07 +00:00
|
|
|
md5, _ := treeNode.Get(md5KV)
|
2022-05-17 14:56:05 +00:00
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
if treeNode.IsSplit() {
|
|
|
|
return nil, errors.New("invalid version tree node: this is split node")
|
|
|
|
}
|
|
|
|
|
2022-05-20 15:02:00 +00:00
|
|
|
version := &data.NodeVersion{
|
|
|
|
BaseNodeVersion: data.BaseNodeVersion{
|
2024-07-12 12:27:00 +00:00
|
|
|
ID: treeNode.ID[0],
|
|
|
|
ParenID: treeNode.ParentID[0],
|
2024-01-17 14:26:02 +00:00
|
|
|
OID: treeNode.ObjID,
|
2024-07-12 12:27:00 +00:00
|
|
|
Timestamp: treeNode.TimeStamp[0],
|
2024-01-17 14:26:02 +00:00
|
|
|
ETag: eTag,
|
|
|
|
MD5: md5,
|
|
|
|
Size: treeNode.Size,
|
|
|
|
FilePath: filePath,
|
|
|
|
IsDeleteMarker: isDeleteMarker,
|
2022-05-17 14:56:05 +00:00
|
|
|
},
|
2022-05-20 15:02:00 +00:00
|
|
|
IsUnversioned: isUnversioned,
|
2023-06-27 12:49:20 +00:00
|
|
|
IsCombined: isCombined,
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
if createdStr, ok := treeNode.Get(createdKV); ok {
|
2024-03-04 12:35:23 +00:00
|
|
|
if utcMilli, err := strconv.ParseInt(createdStr, 10, 64); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, createdStr), zap.Error(err))
|
|
|
|
} else {
|
2024-01-17 14:26:02 +00:00
|
|
|
created := time.UnixMilli(utcMilli)
|
|
|
|
version.Created = &created
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
2024-01-17 14:26:02 +00:00
|
|
|
}
|
2022-05-20 15:02:00 +00:00
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
if ownerStr, ok := treeNode.Get(ownerKV); ok {
|
2022-05-20 15:02:00 +00:00
|
|
|
var owner user.ID
|
2024-03-04 12:35:23 +00:00
|
|
|
if err := owner.DecodeString(ownerStr); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerStr), zap.Error(err))
|
|
|
|
} else {
|
2024-01-17 14:26:02 +00:00
|
|
|
version.Owner = &owner
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
}
|
2024-01-17 14:26:02 +00:00
|
|
|
|
2024-07-22 09:00:17 +00:00
|
|
|
if creationEpoch, ok := treeNode.Get(creationEpochKV); ok {
|
|
|
|
if epoch, err := strconv.ParseUint(creationEpoch, 10, 64); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
version.CreationEpoch = epoch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
return version, nil
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
index int
|
|
|
|
maxTimestamp uint64
|
|
|
|
)
|
|
|
|
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
return nil, errors.New("multi node must have at least one node")
|
|
|
|
}
|
|
|
|
|
|
|
|
treeNodes := make([]*treeNode, len(nodes))
|
|
|
|
|
|
|
|
for i, node := range nodes {
|
|
|
|
if treeNodes[i], err = newTreeNode(node); err != nil {
|
|
|
|
return nil, fmt.Errorf("parse system node response: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if timestamp := getMaxTimestamp(node); timestamp > maxTimestamp {
|
|
|
|
index = i
|
|
|
|
maxTimestamp = timestamp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
treeNodes[0], treeNodes[index] = treeNodes[index], treeNodes[0]
|
|
|
|
|
|
|
|
return &multiSystemNode{
|
|
|
|
nodes: treeNodes,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *multiSystemNode) Latest() *treeNode {
|
|
|
|
return m.nodes[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *multiSystemNode) Old() []*treeNode {
|
|
|
|
return m.nodes[1:]
|
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *treeNode) (*data.MultipartInfo, error) {
|
2023-08-16 10:59:19 +00:00
|
|
|
uploadID, _ := treeNode.Get(uploadIDKV)
|
|
|
|
if uploadID == "" {
|
2024-03-04 12:35:23 +00:00
|
|
|
return nil, fmt.Errorf("it's not a multipart node: missing UploadId")
|
2023-08-16 10:59:19 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
if treeNode.IsSplit() {
|
|
|
|
return nil, fmt.Errorf("invalid multipart node '%s': tree node is split", filePath)
|
|
|
|
}
|
|
|
|
|
2023-08-16 10:59:19 +00:00
|
|
|
multipartInfo := &data.MultipartInfo{
|
2024-07-12 12:27:00 +00:00
|
|
|
ID: treeNode.ID[0],
|
2023-08-16 10:59:19 +00:00
|
|
|
Key: filePath,
|
|
|
|
UploadID: uploadID,
|
|
|
|
Meta: treeNode.Meta,
|
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
if ownerID, ok := treeNode.Get(ownerKV); ok {
|
|
|
|
if err := multipartInfo.Owner.DecodeString(ownerID); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, ownerID), zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
2023-08-16 10:59:19 +00:00
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
if created, ok := treeNode.Get(createdKV); ok {
|
|
|
|
if utcMilli, err := strconv.ParseInt(created, 10, 64); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, created), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
multipartInfo.Created = time.UnixMilli(utcMilli)
|
|
|
|
}
|
2023-08-16 10:59:19 +00:00
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
if finished, ok := treeNode.Get(finishedKV); ok {
|
|
|
|
if flag, err := strconv.ParseBool(finished); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, finished), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
multipartInfo.Finished = flag
|
|
|
|
}
|
2023-12-04 06:42:25 +00:00
|
|
|
}
|
|
|
|
|
2024-07-22 09:00:17 +00:00
|
|
|
if creationEpoch, ok := treeNode.Get(creationEpochKV); ok {
|
|
|
|
if epoch, err := strconv.ParseUint(creationEpoch, 10, 64); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, creationEpoch), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
multipartInfo.CreationEpoch = epoch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-16 10:59:19 +00:00
|
|
|
return multipartInfo, nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
func newMultipartInfo(log *zap.Logger, node NodeResponse) (*data.MultipartInfo, error) {
|
2024-07-18 13:40:55 +00:00
|
|
|
if err := validateNodeResponse(node); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
if len(node.GetNodeID()) != 1 {
|
|
|
|
return nil, errors.New("invalid multipart node: this is split node")
|
|
|
|
}
|
|
|
|
|
2022-05-24 08:41:10 +00:00
|
|
|
multipartInfo := &data.MultipartInfo{
|
2024-07-12 12:27:00 +00:00
|
|
|
ID: node.GetNodeID()[0],
|
2022-05-24 08:41:10 +00:00
|
|
|
Meta: make(map[string]string, len(node.GetMeta())),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, kv := range node.GetMeta() {
|
|
|
|
switch kv.GetKey() {
|
|
|
|
case uploadIDKV:
|
|
|
|
multipartInfo.UploadID = string(kv.GetValue())
|
2023-03-14 14:31:15 +00:00
|
|
|
case FileNameKey:
|
2022-09-07 08:16:42 +00:00
|
|
|
multipartInfo.Key = string(kv.GetValue())
|
2022-05-24 08:41:10 +00:00
|
|
|
case createdKV:
|
2024-03-04 12:35:23 +00:00
|
|
|
if utcMilli, err := strconv.ParseInt(string(kv.GetValue()), 10, 64); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(createdKV, string(kv.GetValue())), zap.Error(err))
|
|
|
|
} else {
|
2022-05-24 08:41:10 +00:00
|
|
|
multipartInfo.Created = time.UnixMilli(utcMilli)
|
|
|
|
}
|
|
|
|
case ownerKV:
|
2024-03-04 12:35:23 +00:00
|
|
|
if err := multipartInfo.Owner.DecodeString(string(kv.GetValue())); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(ownerKV, string(kv.GetValue())), zap.Error(err))
|
|
|
|
}
|
2023-12-04 06:42:25 +00:00
|
|
|
case finishedKV:
|
2024-03-04 12:35:23 +00:00
|
|
|
if isFinished, err := strconv.ParseBool(string(kv.GetValue())); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(finishedKV, string(kv.GetValue())), zap.Error(err))
|
|
|
|
} else {
|
2023-12-04 06:42:25 +00:00
|
|
|
multipartInfo.Finished = isFinished
|
|
|
|
}
|
2024-07-22 09:00:17 +00:00
|
|
|
case creationEpochKV:
|
|
|
|
if epoch, err := strconv.ParseUint(string(kv.GetValue()), 10, 64); err != nil {
|
|
|
|
log.Warn(logs.InvalidTreeKV, zap.String(creationEpochKV, string(kv.GetValue())), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
multipartInfo.CreationEpoch = epoch
|
|
|
|
}
|
2022-05-24 08:41:10 +00:00
|
|
|
default:
|
|
|
|
multipartInfo.Meta[kv.GetKey()] = string(kv.GetValue())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if multipartInfo.UploadID == "" {
|
|
|
|
return nil, fmt.Errorf("it's not a multipart node")
|
|
|
|
}
|
|
|
|
|
|
|
|
return multipartInfo, nil
|
|
|
|
}
|
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
func validateNodeResponse(node NodeResponse) error {
|
|
|
|
ids := node.GetNodeID()
|
|
|
|
parentIDs := node.GetParentID()
|
|
|
|
timestamps := node.GetTimestamp()
|
|
|
|
|
|
|
|
if len(ids) == 0 || len(parentIDs) == 0 || len(timestamps) == 0 {
|
|
|
|
return errors.New("invalid node response: missing ids")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ids) != len(parentIDs) || len(parentIDs) != len(timestamps) {
|
|
|
|
return errors.New("invalid node response: multiple ids length mismatch")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPartInfo(node NodeResponse) (*data.PartInfoExtended, error) {
|
|
|
|
if err := validateNodeResponse(node); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(node.GetNodeID()) != 1 {
|
|
|
|
return nil, errors.New("invalid part node: this is split node")
|
|
|
|
}
|
|
|
|
|
|
|
|
partInfo := &data.PartInfoExtended{
|
|
|
|
Timestamp: node.GetTimestamp()[0],
|
|
|
|
}
|
2022-05-24 11:30:37 +00:00
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
var err error
|
2022-05-24 11:30:37 +00:00
|
|
|
for _, kv := range node.GetMeta() {
|
2022-05-31 12:38:06 +00:00
|
|
|
value := string(kv.GetValue())
|
2022-05-24 11:30:37 +00:00
|
|
|
switch kv.GetKey() {
|
|
|
|
case partNumberKV:
|
2022-05-31 12:38:06 +00:00
|
|
|
if partInfo.Number, err = strconv.Atoi(value); err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid part number: %w", err)
|
|
|
|
}
|
2022-05-24 11:30:37 +00:00
|
|
|
case oidKV:
|
2022-05-31 12:38:06 +00:00
|
|
|
if err = partInfo.OID.DecodeString(value); err != nil {
|
2022-05-24 11:30:37 +00:00
|
|
|
return nil, fmt.Errorf("invalid oid: %w", err)
|
|
|
|
}
|
2022-05-31 12:38:06 +00:00
|
|
|
case etagKV:
|
|
|
|
partInfo.ETag = value
|
|
|
|
case sizeKV:
|
2023-06-01 13:45:28 +00:00
|
|
|
if partInfo.Size, err = strconv.ParseUint(value, 10, 64); err != nil {
|
2022-05-31 12:38:06 +00:00
|
|
|
return nil, fmt.Errorf("invalid part size: %w", err)
|
|
|
|
}
|
|
|
|
case createdKV:
|
|
|
|
var utcMilli int64
|
|
|
|
if utcMilli, err = strconv.ParseInt(value, 10, 64); err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid created timestamp: %w", err)
|
|
|
|
}
|
|
|
|
partInfo.Created = time.UnixMilli(utcMilli)
|
2023-10-02 08:52:07 +00:00
|
|
|
case md5KV:
|
|
|
|
partInfo.MD5 = value
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if partInfo.Number <= 0 {
|
|
|
|
return nil, fmt.Errorf("it's not a part node")
|
|
|
|
}
|
|
|
|
|
|
|
|
return partInfo, nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetSettingsNode(ctx context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
|
2024-07-17 09:44:38 +00:00
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
|
2022-04-22 07:18:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("couldn't get node: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
node := multiNode.Latest()
|
|
|
|
|
2022-07-20 10:30:19 +00:00
|
|
|
settings := &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
2022-07-19 14:58:18 +00:00
|
|
|
if versioningValue, ok := node.Get(versioningKV); ok {
|
|
|
|
settings.Versioning = versioningValue
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if lockConfigurationValue, ok := node.Get(lockConfigurationKV); ok {
|
|
|
|
if settings.LockConfiguration, err = parseLockConfiguration(lockConfigurationValue); err != nil {
|
|
|
|
return nil, fmt.Errorf("settings node: invalid lock configuration: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-12 12:28:55 +00:00
|
|
|
settings.CannedACL, _ = node.Get(cannedACLKV)
|
|
|
|
|
|
|
|
if ownerKeyHex, ok := node.Get(ownerKeyKV); ok {
|
|
|
|
if settings.OwnerKey, err = keys.NewPublicKeyFromString(ownerKeyHex); err != nil {
|
|
|
|
c.reqLogger(ctx).Error(logs.SettingsNodeInvalidOwnerKey, zap.Error(err))
|
|
|
|
}
|
2024-02-12 08:00:04 +00:00
|
|
|
}
|
|
|
|
|
2022-04-22 07:18:21 +00:00
|
|
|
return settings, nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) error {
|
2024-07-17 09:44:38 +00:00
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
|
2024-09-27 08:13:13 +00:00
|
|
|
isErrNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
2022-04-22 07:18:21 +00:00
|
|
|
if err != nil && !isErrNotFound {
|
|
|
|
return fmt.Errorf("couldn't get node: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
meta := metaFromSettings(settings)
|
|
|
|
|
|
|
|
if isErrNotFound {
|
2023-03-14 14:31:15 +00:00
|
|
|
_, err = c.service.AddNode(ctx, bktInfo, systemTree, 0, meta)
|
2022-04-22 07:18:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
latest := multiNode.Latest()
|
|
|
|
ind := latest.GetLatestNodeIndex()
|
|
|
|
if latest.IsSplit() {
|
|
|
|
c.reqLogger(ctx).Error(logs.BucketSettingsNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil {
|
|
|
|
return fmt.Errorf("move settings node: %w", err)
|
2024-07-12 12:27:00 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
c.cleanOldNodes(ctx, multiNode.Old(), bktInfo)
|
|
|
|
|
|
|
|
return nil
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
|
|
|
|
2024-07-15 15:35:54 +00:00
|
|
|
func (c *Tree) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
|
2024-07-17 09:44:38 +00:00
|
|
|
node, err := c.getSystemNode(ctx, bktInfo, corsFilename)
|
2022-05-12 02:33:03 +00:00
|
|
|
if err != nil {
|
2024-07-15 15:35:54 +00:00
|
|
|
return oid.Address{}, err
|
2022-05-12 02:33:03 +00:00
|
|
|
}
|
|
|
|
|
2024-07-25 13:06:38 +00:00
|
|
|
return getTreeNodeAddress(node.Latest())
|
2022-05-17 13:52:51 +00:00
|
|
|
}
|
2022-05-12 02:33:03 +00:00
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
func (c *Tree) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error) {
|
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, corsFilename)
|
2024-09-27 08:13:13 +00:00
|
|
|
isErrNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
2022-05-17 13:52:51 +00:00
|
|
|
if err != nil && !isErrNotFound {
|
2024-07-17 09:44:38 +00:00
|
|
|
return nil, fmt.Errorf("couldn't get node: %w", err)
|
2022-05-12 02:33:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
meta := make(map[string]string)
|
2023-03-14 14:31:15 +00:00
|
|
|
meta[FileNameKey] = corsFilename
|
2024-07-15 15:35:54 +00:00
|
|
|
meta[oidKV] = addr.Object().EncodeToString()
|
|
|
|
meta[cidKV] = addr.Container().EncodeToString()
|
2022-05-12 02:33:03 +00:00
|
|
|
|
2022-05-17 13:52:51 +00:00
|
|
|
if isErrNotFound {
|
2023-03-14 14:31:15 +00:00
|
|
|
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
|
2024-07-17 09:44:38 +00:00
|
|
|
return nil, err
|
2022-06-27 09:33:36 +00:00
|
|
|
}
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNoNodeToRemove
|
2024-07-17 09:44:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
latest := multiNode.Latest()
|
|
|
|
ind := latest.GetLatestNodeIndex()
|
|
|
|
if latest.IsSplit() {
|
|
|
|
c.reqLogger(ctx).Error(logs.BucketCORSNodeHasMultipleIDs)
|
2022-05-17 13:52:51 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil {
|
|
|
|
return nil, fmt.Errorf("move cors node: %w", err)
|
2024-07-12 12:27:00 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
objToDelete := make([]oid.Address, 1, len(multiNode.nodes))
|
2024-07-25 13:06:38 +00:00
|
|
|
objToDelete[0], err = getTreeNodeAddress(latest)
|
2024-07-15 15:35:54 +00:00
|
|
|
if err != nil {
|
2024-07-17 09:44:38 +00:00
|
|
|
return nil, fmt.Errorf("parse object addr of latest cors node in tree: %w", err)
|
2024-07-15 15:35:54 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
objToDelete = append(objToDelete, c.cleanOldNodes(ctx, multiNode.Old(), bktInfo)...)
|
|
|
|
|
|
|
|
return objToDelete, nil
|
2022-05-12 02:33:03 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
func (c *Tree) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error) {
|
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, corsFilename)
|
2024-09-27 08:13:13 +00:00
|
|
|
isErrNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
2024-07-17 09:44:38 +00:00
|
|
|
if err != nil && !isErrNotFound {
|
|
|
|
return nil, err
|
2022-05-17 13:52:51 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
if isErrNotFound {
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNoNodeToRemove
|
2024-07-17 09:44:38 +00:00
|
|
|
}
|
2024-07-15 15:35:54 +00:00
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
objToDelete := c.cleanOldNodes(ctx, multiNode.nodes, bktInfo)
|
|
|
|
if len(objToDelete) != len(multiNode.nodes) {
|
2024-08-12 11:22:56 +00:00
|
|
|
return nil, fmt.Errorf("failed to clean all old cors nodes")
|
2024-07-15 15:35:54 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
return objToDelete, nil
|
2024-07-15 15:35:54 +00:00
|
|
|
}
|
|
|
|
|
2024-07-25 13:06:38 +00:00
|
|
|
func getTreeNodeAddress(node *treeNode) (oid.Address, error) {
|
2024-07-15 15:35:54 +00:00
|
|
|
var addr oid.Address
|
|
|
|
addr.SetObject(node.ObjID)
|
|
|
|
|
|
|
|
if cidStr, ok := node.Get(cidKV); ok {
|
|
|
|
var cnrID cid.ID
|
|
|
|
if err := cnrID.DecodeString(cidStr); err != nil {
|
|
|
|
return oid.Address{}, fmt.Errorf("couldn't decode cid: %w", err)
|
|
|
|
}
|
|
|
|
addr.SetContainer(cnrID)
|
2022-05-17 13:52:51 +00:00
|
|
|
}
|
|
|
|
|
2024-07-15 15:35:54 +00:00
|
|
|
return addr, nil
|
2022-05-12 02:33:03 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
func (c *Tree) cleanOldNodes(ctx context.Context, nodes []*treeNode, bktInfo *data.BucketInfo) []oid.Address {
|
|
|
|
res := make([]oid.Address, 0, len(nodes))
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
ind := node.GetLatestNodeIndex()
|
|
|
|
if node.IsSplit() {
|
|
|
|
c.reqLogger(ctx).Error(logs.SystemNodeHasMultipleIDs, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64s("ids", node.ID))
|
|
|
|
}
|
|
|
|
if err := c.service.RemoveNode(ctx, bktInfo, systemTree, node.ID[ind]); err != nil {
|
|
|
|
c.reqLogger(ctx).Warn(logs.FailedToRemoveOldSystemNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]))
|
|
|
|
} else {
|
2024-07-25 13:06:38 +00:00
|
|
|
addr, err := getTreeNodeAddress(node)
|
2024-07-17 09:44:38 +00:00
|
|
|
if err != nil {
|
|
|
|
c.log.Warn(logs.FailedToParseAddressInTreeNode, zap.String("FileName", node.Meta[FileNameKey]), zap.Uint64("id", node.ID[ind]))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
res = append(res, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
tagNode, err := c.getTreeNode(ctx, bktInfo, objVersion.ID, isTagKV)
|
2022-05-24 06:58:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-31 14:25:47 +00:00
|
|
|
return getObjectTagging(tagNode), nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func getObjectTagging(tagNode *treeNode) map[string]string {
|
2022-05-24 06:58:33 +00:00
|
|
|
if tagNode == nil {
|
2022-05-31 14:25:47 +00:00
|
|
|
return nil
|
2022-05-24 06:58:33 +00:00
|
|
|
}
|
|
|
|
|
2022-05-25 10:16:51 +00:00
|
|
|
meta := make(map[string]string)
|
|
|
|
|
|
|
|
for key, val := range tagNode.Meta {
|
2022-06-15 14:43:51 +00:00
|
|
|
if strings.HasPrefix(key, userDefinedTagPrefix) {
|
|
|
|
meta[strings.TrimPrefix(key, userDefinedTagPrefix)] = val
|
2022-05-25 10:16:51 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-24 06:58:33 +00:00
|
|
|
|
2022-05-31 14:25:47 +00:00
|
|
|
return meta
|
2022-05-24 06:58:33 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) PutObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion, tagSet map[string]string) error {
|
2022-09-13 09:44:18 +00:00
|
|
|
tagNode, err := c.getTreeNode(ctx, bktInfo, objVersion.ID, isTagKV)
|
2022-05-24 06:58:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:16:51 +00:00
|
|
|
treeTagSet := make(map[string]string)
|
|
|
|
treeTagSet[isTagKV] = "true"
|
|
|
|
|
|
|
|
for key, val := range tagSet {
|
2022-06-15 14:43:51 +00:00
|
|
|
treeTagSet[userDefinedTagPrefix+key] = val
|
2022-05-25 10:16:51 +00:00
|
|
|
}
|
2022-05-24 06:58:33 +00:00
|
|
|
|
|
|
|
if tagNode == nil {
|
2023-03-14 14:31:15 +00:00
|
|
|
_, err = c.service.AddNode(ctx, bktInfo, versionTree, objVersion.ID, treeTagSet)
|
2024-07-12 12:27:00 +00:00
|
|
|
return err
|
2022-05-24 06:58:33 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
ind := tagNode.GetLatestNodeIndex()
|
|
|
|
if tagNode.IsSplit() {
|
2024-07-17 09:44:38 +00:00
|
|
|
c.reqLogger(ctx).Error(logs.ObjectTaggingNodeHasMultipleIDs)
|
2024-07-12 12:27:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return c.service.MoveNode(ctx, bktInfo, versionTree, tagNode.ID[ind], objVersion.ID, treeTagSet)
|
2022-05-24 06:58:33 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) DeleteObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error {
|
2023-10-18 06:52:37 +00:00
|
|
|
return c.PutObjectTagging(ctx, bktInfo, objVersion, nil)
|
2022-05-24 06:58:33 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
2024-07-17 09:44:38 +00:00
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, bucketTaggingFilename)
|
2022-05-25 01:58:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:16:51 +00:00
|
|
|
tags := make(map[string]string)
|
2022-05-25 01:58:25 +00:00
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
for key, val := range multiNode.Latest().Meta {
|
2022-06-15 14:43:51 +00:00
|
|
|
if strings.HasPrefix(key, userDefinedTagPrefix) {
|
|
|
|
tags[strings.TrimPrefix(key, userDefinedTagPrefix)] = val
|
2022-05-25 10:16:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return tags, nil
|
2022-05-25 01:58:25 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
2024-07-17 09:44:38 +00:00
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, bucketTaggingFilename)
|
2024-09-27 08:13:13 +00:00
|
|
|
isErrNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
2022-05-25 01:58:25 +00:00
|
|
|
if err != nil && !isErrNotFound {
|
|
|
|
return fmt.Errorf("couldn't get node: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:16:51 +00:00
|
|
|
treeTagSet := make(map[string]string)
|
2023-03-14 14:31:15 +00:00
|
|
|
treeTagSet[FileNameKey] = bucketTaggingFilename
|
2022-05-25 10:16:51 +00:00
|
|
|
|
|
|
|
for key, val := range tagSet {
|
2022-06-15 14:43:51 +00:00
|
|
|
treeTagSet[userDefinedTagPrefix+key] = val
|
2022-05-25 10:16:51 +00:00
|
|
|
}
|
2022-05-25 01:58:25 +00:00
|
|
|
|
|
|
|
if isErrNotFound {
|
2023-03-14 14:31:15 +00:00
|
|
|
_, err = c.service.AddNode(ctx, bktInfo, systemTree, 0, treeTagSet)
|
2024-07-12 12:27:00 +00:00
|
|
|
return err
|
2022-05-25 01:58:25 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
latest := multiNode.Latest()
|
|
|
|
ind := latest.GetLatestNodeIndex()
|
|
|
|
if latest.IsSplit() {
|
|
|
|
c.reqLogger(ctx).Error(logs.BucketTaggingNodeHasMultipleIDs, zap.Uint64s("ids", latest.ID))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, treeTagSet); err != nil {
|
|
|
|
return fmt.Errorf("move bucket tagging node: %w", err)
|
2024-07-12 12:27:00 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
c.cleanOldNodes(ctx, multiNode.Old(), bktInfo)
|
|
|
|
|
|
|
|
return nil
|
2022-05-25 01:58:25 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
2023-10-18 06:52:37 +00:00
|
|
|
return c.PutBucketTagging(ctx, bktInfo, nil)
|
2022-05-25 01:58:25 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) getTreeNode(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, key string) (*treeNode, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
nodes, err := c.getTreeNodes(ctx, bktInfo, nodeID, key)
|
2022-06-01 10:57:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// if there will be many allocations, consider having separate
|
|
|
|
// implementations of 'getTreeNode' and 'getTreeNodes'
|
|
|
|
return nodes[key], nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) getTreeNodes(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, keys ...string) (map[string]*treeNode, error) {
|
2024-08-21 12:10:34 +00:00
|
|
|
subtree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, []uint64{nodeID}, 2, false)
|
2022-05-24 06:58:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
// consider using map[string][]*treeNode
|
|
|
|
// to be able to remove unused node, that can be added during split
|
2023-03-14 14:31:15 +00:00
|
|
|
treeNodes := make(map[string]*treeNode, len(keys))
|
2022-05-24 06:58:33 +00:00
|
|
|
|
|
|
|
for _, s := range subtree {
|
|
|
|
node, err := newTreeNode(s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-06-01 10:57:23 +00:00
|
|
|
for _, key := range keys {
|
|
|
|
if _, ok := node.Get(key); ok {
|
|
|
|
treeNodes[key] = node
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(treeNodes) == len(keys) {
|
2022-05-24 06:58:33 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-01 10:57:23 +00:00
|
|
|
return treeNodes, nil
|
2022-05-24 06:58:33 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetVersions(ctx context.Context, bktInfo *data.BucketInfo, filepath string) ([]*data.NodeVersion, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
return c.getVersions(ctx, bktInfo, versionTree, filepath, false)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
|
2024-07-22 09:00:17 +00:00
|
|
|
meta := []string{oidKV, isCombinedKV, isUnversionedKV, isDeleteMarkerKV, etagKV, sizeKV, md5KV, creationEpochKV}
|
2022-05-23 09:53:48 +00:00
|
|
|
path := pathFromName(objectName)
|
2022-05-17 14:56:05 +00:00
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
p := &GetNodesParams{
|
2022-09-13 09:44:18 +00:00
|
|
|
BktInfo: bktInfo,
|
2022-06-10 11:57:41 +00:00
|
|
|
TreeID: versionTree,
|
|
|
|
Path: path,
|
|
|
|
Meta: meta,
|
2023-09-28 09:12:29 +00:00
|
|
|
LatestOnly: false,
|
2022-06-10 11:57:41 +00:00
|
|
|
AllAttrs: false,
|
|
|
|
}
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes, err := c.service.GetNodes(ctx, p)
|
2022-06-10 11:57:41 +00:00
|
|
|
if err != nil {
|
2022-09-13 10:35:30 +00:00
|
|
|
return nil, err
|
2022-06-10 11:57:41 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
latestNode, err := getLatestVersionNode(nodes)
|
2023-09-28 09:12:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
return newNodeVersion(c.reqLogger(ctx), objectName, latestNode)
|
2023-09-28 09:12:29 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
2023-09-28 09:12:29 +00:00
|
|
|
var (
|
|
|
|
maxCreationTime uint64
|
|
|
|
targetIndexNode = -1
|
|
|
|
)
|
|
|
|
|
|
|
|
for i, node := range nodes {
|
2024-07-12 12:27:00 +00:00
|
|
|
if !checkExistOID(node.GetMeta()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime {
|
2023-09-28 09:12:29 +00:00
|
|
|
targetIndexNode = i
|
2024-07-12 12:27:00 +00:00
|
|
|
maxCreationTime = currentCreationTime
|
2023-09-28 09:12:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if targetIndexNode == -1 {
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNodeNotFound
|
2022-06-10 11:57:41 +00:00
|
|
|
}
|
|
|
|
|
2023-09-28 09:12:29 +00:00
|
|
|
return nodes[targetIndexNode], nil
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func getMaxTimestamp(node NodeResponse) uint64 {
|
|
|
|
var maxTimestamp uint64
|
|
|
|
|
|
|
|
for _, timestamp := range node.GetTimestamp() {
|
|
|
|
if timestamp > maxTimestamp {
|
|
|
|
maxTimestamp = timestamp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return maxTimestamp
|
|
|
|
}
|
|
|
|
|
2023-09-28 09:12:29 +00:00
|
|
|
func checkExistOID(meta []Meta) bool {
|
|
|
|
for _, kv := range meta {
|
|
|
|
if kv.GetKey() == "OID" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2022-09-07 08:16:42 +00:00
|
|
|
// pathFromName splits name by '/'.
|
2022-05-23 09:53:48 +00:00
|
|
|
func pathFromName(objectName string) []string {
|
2022-09-07 08:16:42 +00:00
|
|
|
return strings.Split(objectName, separator)
|
2022-05-23 09:53:48 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
type DummySubTreeStream struct {
|
|
|
|
data NodeResponse
|
|
|
|
read bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *DummySubTreeStream) Next() (NodeResponse, error) {
|
|
|
|
if s.read {
|
|
|
|
return nil, io.EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
s.read = true
|
|
|
|
return s.data, nil
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
type MultiID []uint64
|
|
|
|
|
|
|
|
func (m MultiID) Equal(id MultiID) bool {
|
|
|
|
seen := make(map[uint64]struct{}, len(m))
|
|
|
|
|
|
|
|
for i := range m {
|
|
|
|
seen[m[i]] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range id {
|
|
|
|
if _, ok := seen[id[i]]; !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
type VersionsByPrefixStreamImpl struct {
|
2023-10-09 06:57:33 +00:00
|
|
|
ctx context.Context
|
2024-07-12 12:27:00 +00:00
|
|
|
rootID MultiID
|
|
|
|
intermediateRootID MultiID
|
2023-10-09 06:57:33 +00:00
|
|
|
service ServiceClient
|
|
|
|
bktInfo *data.BucketInfo
|
|
|
|
mainStream SubTreeStream
|
|
|
|
innerStream SubTreeStream
|
|
|
|
headPrefix string
|
|
|
|
tailPrefix string
|
|
|
|
namesMap map[uint64]string
|
|
|
|
ended bool
|
2024-01-19 09:17:16 +00:00
|
|
|
latestOnly bool
|
|
|
|
currentLatest *data.NodeVersion
|
2024-01-21 11:13:37 +00:00
|
|
|
log *zap.Logger
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
2024-01-21 11:13:37 +00:00
|
|
|
func (s *VersionsByPrefixStreamImpl) Next(context.Context) (*data.NodeVersion, error) {
|
2023-10-09 06:57:33 +00:00
|
|
|
if s.ended {
|
|
|
|
return nil, io.EOF
|
|
|
|
}
|
|
|
|
|
2024-01-22 08:09:11 +00:00
|
|
|
for {
|
2024-01-21 11:13:37 +00:00
|
|
|
if s.innerStream == nil {
|
|
|
|
node, err := s.getNodeFromMainStream()
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
|
|
s.ended = true
|
|
|
|
if s.currentLatest != nil {
|
|
|
|
return s.currentLatest, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("get node from main stream: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = s.initInnerStream(node); err != nil {
|
|
|
|
return nil, fmt.Errorf("init inner stream: %w", err)
|
2024-01-19 09:53:53 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
nodeVersion, err := s.getNodeVersionFromInnerStream()
|
|
|
|
if err != nil {
|
2024-01-19 09:17:16 +00:00
|
|
|
if errors.Is(err, io.EOF) {
|
2024-01-21 11:13:37 +00:00
|
|
|
s.innerStream = nil
|
|
|
|
maps.Clear(s.namesMap)
|
2024-07-12 12:27:00 +00:00
|
|
|
if s.currentLatest != nil && !s.intermediateRootID.Equal([]uint64{s.currentLatest.ID}) {
|
2024-01-19 09:17:16 +00:00
|
|
|
return s.currentLatest, nil
|
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
continue
|
2024-01-19 09:17:16 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
return nil, fmt.Errorf("inner stream: %w", err)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
return nodeVersion, nil
|
|
|
|
}
|
|
|
|
}
|
2023-10-09 06:57:33 +00:00
|
|
|
|
2024-01-21 11:13:37 +00:00
|
|
|
func (s *VersionsByPrefixStreamImpl) getNodeFromMainStream() (NodeResponse, error) {
|
2024-01-22 08:09:11 +00:00
|
|
|
for {
|
2024-01-21 11:13:37 +00:00
|
|
|
node, err := s.mainStream.Next()
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, ErrNodeNotFound) {
|
|
|
|
return nil, io.EOF
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("main stream next: %w", err)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
if !s.rootID.Equal(node.GetNodeID()) && strings.HasPrefix(getFilename(node), s.tailPrefix) {
|
2024-01-21 11:13:37 +00:00
|
|
|
return node, nil
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *VersionsByPrefixStreamImpl) initInnerStream(node NodeResponse) (err error) {
|
2024-07-12 12:27:00 +00:00
|
|
|
if s.rootID.Equal(node.GetParentID()) {
|
2024-01-21 11:13:37 +00:00
|
|
|
s.intermediateRootID = node.GetNodeID()
|
|
|
|
}
|
|
|
|
|
|
|
|
if isIntermediate(node) {
|
|
|
|
s.innerStream, err = s.service.GetSubTreeStream(s.ctx, s.bktInfo, versionTree, node.GetNodeID(), maxGetSubTreeDepth)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("get sub tree node from main stream: %w", err)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
} else {
|
|
|
|
s.innerStream = &DummySubTreeStream{data: node}
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
2024-01-21 11:13:37 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *VersionsByPrefixStreamImpl) getNodeVersionFromInnerStream() (*data.NodeVersion, error) {
|
2024-01-22 08:09:11 +00:00
|
|
|
for {
|
2024-01-21 11:13:37 +00:00
|
|
|
node, err := s.innerStream.Next()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("inner stream: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeVersion, skip, err := s.parseNodeResponse(node)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if skip {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.latestOnly {
|
|
|
|
if s.currentLatest == nil {
|
|
|
|
s.currentLatest = nodeVersion
|
|
|
|
continue
|
2024-01-19 09:17:16 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
|
|
|
|
if s.currentLatest.FilePath != nodeVersion.FilePath {
|
|
|
|
res := s.currentLatest
|
|
|
|
s.currentLatest = nodeVersion
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.currentLatest.Timestamp < nodeVersion.Timestamp {
|
|
|
|
s.currentLatest = nodeVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
|
|
|
|
return nodeVersion, nil
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *VersionsByPrefixStreamImpl) parseNodeResponse(node NodeResponse) (res *data.NodeVersion, skip bool, err error) {
|
|
|
|
trNode, fileName, err := parseTreeNode(node)
|
2023-10-09 06:57:33 +00:00
|
|
|
if err != nil {
|
2024-08-02 13:33:39 +00:00
|
|
|
if !errors.Is(err, errNodeDoesntContainFileName) {
|
|
|
|
s.log.Debug(logs.ParseTreeNode, zap.Error(err))
|
|
|
|
}
|
2024-01-21 11:13:37 +00:00
|
|
|
return nil, true, nil
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var parentPrefix string
|
|
|
|
if s.headPrefix != "" { // The root of subTree can also have a parent
|
|
|
|
parentPrefix = strings.TrimSuffix(s.headPrefix, separator) + separator // To avoid 'foo//bar'
|
|
|
|
}
|
|
|
|
|
|
|
|
var filepath string
|
2024-07-12 12:27:00 +00:00
|
|
|
if !s.intermediateRootID.Equal(trNode.ID) {
|
2023-10-09 06:57:33 +00:00
|
|
|
if filepath, err = formFilePath(node, fileName, s.namesMap); err != nil {
|
2024-01-21 11:13:37 +00:00
|
|
|
return nil, false, fmt.Errorf("invalid node order: %w", err)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
filepath = parentPrefix + fileName
|
2024-07-12 12:27:00 +00:00
|
|
|
for _, id := range trNode.ID {
|
|
|
|
s.namesMap[id] = filepath
|
|
|
|
}
|
2024-01-19 09:17:16 +00:00
|
|
|
}
|
|
|
|
|
2024-01-21 11:13:37 +00:00
|
|
|
if trNode.ObjID.Equals(oid.ID{}) { // The node can be intermediate, but we still want to update namesMap
|
|
|
|
return nil, true, nil
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
nodeVersion, err := newNodeVersionFromTreeNode(s.log, filepath, trNode)
|
|
|
|
return nodeVersion, false, err
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
func (c *Tree) InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
|
2023-10-09 06:57:33 +00:00
|
|
|
mainStream, tailPrefix, rootID, err := c.getSubTreeByPrefixMainStream(ctx, bktInfo, versionTree, prefix)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, io.EOF) {
|
2024-01-20 22:13:35 +00:00
|
|
|
return &VersionsByPrefixStreamImpl{ended: true}, nil
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
return &VersionsByPrefixStreamImpl{
|
2023-10-09 06:57:33 +00:00
|
|
|
ctx: ctx,
|
|
|
|
namesMap: map[uint64]string{},
|
|
|
|
rootID: rootID,
|
|
|
|
service: c.service,
|
|
|
|
bktInfo: bktInfo,
|
|
|
|
mainStream: mainStream,
|
|
|
|
headPrefix: strings.TrimSuffix(prefix, tailPrefix),
|
|
|
|
tailPrefix: tailPrefix,
|
2024-01-20 22:13:35 +00:00
|
|
|
latestOnly: latestOnly,
|
2024-03-04 12:35:23 +00:00
|
|
|
log: c.reqLogger(ctx),
|
2023-10-09 06:57:33 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func (c *Tree) getSubTreeByPrefixMainStream(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) (SubTreeStream, string, []uint64, error) {
|
2023-10-09 06:57:33 +00:00
|
|
|
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, treeID, prefix)
|
|
|
|
if err != nil {
|
2024-09-27 08:13:13 +00:00
|
|
|
if errors.Is(err, tree.ErrNodeNotFound) {
|
2024-07-12 12:27:00 +00:00
|
|
|
return nil, "", nil, io.EOF
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-07-12 12:27:00 +00:00
|
|
|
return nil, "", nil, err
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
subTree, err := c.service.GetSubTreeStream(ctx, bktInfo, treeID, rootID, 2)
|
|
|
|
if err != nil {
|
2024-09-27 08:13:13 +00:00
|
|
|
if errors.Is(err, tree.ErrNodeNotFound) {
|
2024-07-12 12:27:00 +00:00
|
|
|
return nil, "", nil, io.EOF
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
2024-07-12 12:27:00 +00:00
|
|
|
return nil, "", nil, err
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return subTree, tailPrefix, rootID, nil
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
|
|
|
rootID := []uint64{0}
|
2022-05-24 08:41:10 +00:00
|
|
|
path := strings.Split(prefix, separator)
|
|
|
|
tailPrefix := path[len(path)-1]
|
|
|
|
|
|
|
|
if len(path) > 1 {
|
|
|
|
var err error
|
2022-09-13 09:44:18 +00:00
|
|
|
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
|
2022-05-24 08:41:10 +00:00
|
|
|
if err != nil {
|
2024-07-12 12:27:00 +00:00
|
|
|
return nil, "", err
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-24 08:41:10 +00:00
|
|
|
return rootID, tailPrefix, nil
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
2023-03-14 14:31:15 +00:00
|
|
|
p := &GetNodesParams{
|
2022-09-13 09:44:18 +00:00
|
|
|
BktInfo: bktInfo,
|
2022-05-24 08:41:10 +00:00
|
|
|
TreeID: treeID,
|
2022-05-25 01:58:25 +00:00
|
|
|
Path: prefixPath,
|
|
|
|
LatestOnly: false,
|
2022-05-24 08:41:10 +00:00
|
|
|
AllAttrs: true,
|
2022-05-25 01:58:25 +00:00
|
|
|
}
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes, err := c.service.GetNodes(ctx, p)
|
2022-05-20 15:02:00 +00:00
|
|
|
if err != nil {
|
2024-07-12 12:27:00 +00:00
|
|
|
return nil, err
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var intermediateNodes []uint64
|
|
|
|
for _, node := range nodes {
|
2022-06-07 15:34:43 +00:00
|
|
|
if isIntermediate(node) {
|
2024-07-12 12:27:00 +00:00
|
|
|
intermediateNodes = append(intermediateNodes, node.GetNodeID()...)
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(intermediateNodes) == 0 {
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNodeNotFound
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
return intermediateNodes, nil
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) getSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, treeID, prefix)
|
2022-05-24 08:41:10 +00:00
|
|
|
if err != nil {
|
2024-09-27 08:13:13 +00:00
|
|
|
if errors.Is(err, tree.ErrNodeNotFound) {
|
2022-06-10 11:57:41 +00:00
|
|
|
return nil, "", nil
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
return nil, "", err
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2024-08-21 12:10:34 +00:00
|
|
|
subTree, err := c.service.GetSubTree(ctx, bktInfo, treeID, rootID, 2, false)
|
2022-05-24 08:41:10 +00:00
|
|
|
if err != nil {
|
2024-09-27 08:13:13 +00:00
|
|
|
if errors.Is(err, tree.ErrNodeNotFound) {
|
2022-06-10 11:57:41 +00:00
|
|
|
return nil, "", nil
|
2022-06-06 14:07:38 +00:00
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
return nil, "", err
|
2022-05-24 08:41:10 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
2022-05-24 08:41:10 +00:00
|
|
|
for _, node := range subTree {
|
2024-07-12 12:27:00 +00:00
|
|
|
if MultiID(rootID).Equal(node.GetNodeID()) {
|
2022-07-21 08:18:41 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fileName := getFilename(node)
|
|
|
|
if !strings.HasPrefix(fileName, tailPrefix) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := nodesMap[fileName]
|
|
|
|
|
2022-07-26 15:36:05 +00:00
|
|
|
// Add all nodes if flag latestOnly is false.
|
2024-07-12 12:27:00 +00:00
|
|
|
// Add all intermediate nodes
|
2022-07-26 15:36:05 +00:00
|
|
|
// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
|
|
|
|
if len(nodes) == 0 {
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes = []NodeResponse{node}
|
2022-07-26 15:36:05 +00:00
|
|
|
} else if !latestOnly || isIntermediate(node) {
|
|
|
|
nodes = append(nodes, node)
|
|
|
|
} else if isIntermediate(nodes[0]) {
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes = append([]NodeResponse{node}, nodes...)
|
2024-07-12 12:27:00 +00:00
|
|
|
} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
|
2022-07-26 15:36:05 +00:00
|
|
|
nodes[0] = node
|
2022-05-24 08:41:10 +00:00
|
|
|
}
|
2022-07-21 08:18:41 +00:00
|
|
|
|
|
|
|
nodesMap[fileName] = nodes
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
result := make([]NodeResponse, 0, len(subTree))
|
2022-07-21 08:18:41 +00:00
|
|
|
for _, nodes := range nodesMap {
|
|
|
|
result = append(result, nodes...)
|
2022-05-24 08:41:10 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func getFilename(node NodeResponse) string {
|
2022-05-20 15:02:00 +00:00
|
|
|
for _, kv := range node.GetMeta() {
|
2023-03-14 14:31:15 +00:00
|
|
|
if kv.GetKey() == FileNameKey {
|
2022-07-21 08:18:41 +00:00
|
|
|
return string(kv.GetValue())
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-21 08:18:41 +00:00
|
|
|
return ""
|
2022-05-20 15:02:00 +00:00
|
|
|
}
|
|
|
|
|
2022-07-26 15:36:05 +00:00
|
|
|
func isIntermediate(node NodeResponse) bool {
|
2022-05-24 08:41:10 +00:00
|
|
|
if len(node.GetMeta()) != 1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
return node.GetMeta()[0].GetKey() == FileNameKey
|
2022-05-24 08:41:10 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func formFilePath(node NodeResponse, fileName string, namesMap map[uint64]string) (string, error) {
|
2024-07-12 12:27:00 +00:00
|
|
|
var filepath string
|
|
|
|
|
|
|
|
for i, id := range node.GetParentID() {
|
|
|
|
parentPath, ok := namesMap[id]
|
|
|
|
if !ok {
|
|
|
|
return "", fmt.Errorf("couldn't get parent path")
|
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
filepath = parentPath + separator + fileName
|
|
|
|
namesMap[node.GetNodeID()[i]] = filepath
|
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
|
|
|
|
return filepath, nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func parseTreeNode(node NodeResponse) (*treeNode, string, error) {
|
2024-07-12 12:27:00 +00:00
|
|
|
tNode, err := newTreeNode(node)
|
2022-06-10 11:57:41 +00:00
|
|
|
if err != nil { // invalid OID attribute
|
|
|
|
return nil, "", err
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
fileName, ok := tNode.FileName()
|
2022-06-10 11:57:41 +00:00
|
|
|
if !ok {
|
2024-08-02 13:33:39 +00:00
|
|
|
return nil, "", errNodeDoesntContainFileName
|
2022-06-10 11:57:41 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
return tNode, fileName, nil
|
2022-06-10 11:57:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 08:26:35 +00:00
|
|
|
func formLatestNodeKey(parentID uint64, fileName string) string {
|
2022-06-10 11:57:41 +00:00
|
|
|
return strconv.FormatUint(parentID, 10) + "." + fileName
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetUnversioned(ctx context.Context, bktInfo *data.BucketInfo, filepath string) (*data.NodeVersion, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
return c.getUnversioned(ctx, bktInfo, versionTree, filepath)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) getUnversioned(ctx context.Context, bktInfo *data.BucketInfo, treeID, filepath string) (*data.NodeVersion, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
nodes, err := c.getVersions(ctx, bktInfo, treeID, filepath, true)
|
2022-05-17 14:56:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-04-03 14:26:17 +00:00
|
|
|
if len(nodes) == 0 {
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNodeNotFound
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-04-03 14:26:17 +00:00
|
|
|
if len(nodes) > 1 {
|
2023-08-23 11:07:52 +00:00
|
|
|
c.reqLogger(ctx).Debug(logs.FoundMoreThanOneUnversionedNode,
|
2023-04-03 14:26:17 +00:00
|
|
|
zap.String("treeID", treeID), zap.String("filepath", filepath))
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-04-03 14:26:17 +00:00
|
|
|
sort.Slice(nodes, func(i, j int) bool {
|
|
|
|
return nodes[i].Timestamp > nodes[j].Timestamp
|
|
|
|
})
|
|
|
|
|
2022-05-17 14:56:05 +00:00
|
|
|
return nodes[0], nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) AddVersion(ctx context.Context, bktInfo *data.BucketInfo, version *data.NodeVersion) (uint64, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
return c.addVersion(ctx, bktInfo, versionTree, version)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) RemoveVersion(ctx context.Context, bktInfo *data.BucketInfo, id uint64) error {
|
|
|
|
return c.service.RemoveNode(ctx, bktInfo, versionTree, id)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error {
|
2022-05-24 08:41:10 +00:00
|
|
|
path := pathFromName(info.Key)
|
2022-08-23 14:54:24 +00:00
|
|
|
meta := metaFromMultipart(info, path[len(path)-1])
|
2023-03-14 14:31:15 +00:00
|
|
|
_, err := c.service.AddNodeByPath(ctx, bktInfo, systemTree, path[:len(path)-1], meta)
|
2022-05-23 14:34:13 +00:00
|
|
|
|
2022-08-29 10:37:17 +00:00
|
|
|
return err
|
2022-05-24 08:41:10 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error) {
|
2023-08-16 10:59:19 +00:00
|
|
|
subTreeNodes, headPrefix, err := c.getSubTreeByPrefix(ctx, bktInfo, systemTree, prefix, false)
|
2022-05-24 08:41:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var result []*data.MultipartInfo
|
|
|
|
for _, node := range subTreeNodes {
|
2023-08-16 10:59:19 +00:00
|
|
|
multipartUploads, err := c.getSubTreeMultipartUploads(ctx, bktInfo, node.GetNodeID(), headPrefix)
|
2022-05-24 08:41:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
result = append(result, multipartUploads...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
func (c *Tree) getSubTreeMultipartUploads(ctx context.Context, bktInfo *data.BucketInfo, nodeID []uint64, parentFilePath string) ([]*data.MultipartInfo, error) {
|
2024-08-21 12:10:34 +00:00
|
|
|
// sorting in getSubTree leads to skipping nodes that doesn't have FileName attribute
|
|
|
|
// so when we are only interested in multipart nodes, we can set this flag
|
|
|
|
// (despite we sort multiparts in above layer anyway)
|
|
|
|
// to skip its children (parts) that don't have FileName
|
|
|
|
subTree, err := c.service.GetSubTree(ctx, bktInfo, systemTree, nodeID, maxGetSubTreeDepth, true)
|
2022-05-24 08:41:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-08-16 10:59:19 +00:00
|
|
|
var parentPrefix string
|
|
|
|
if parentFilePath != "" { // The root of subTree can also have a parent
|
|
|
|
parentPrefix = strings.TrimSuffix(parentFilePath, separator) + separator // To avoid 'foo//bar'
|
|
|
|
}
|
|
|
|
|
|
|
|
var filepath string
|
|
|
|
namesMap := make(map[uint64]string, len(subTree))
|
|
|
|
multiparts := make(map[string][]*data.MultipartInfo, len(subTree))
|
|
|
|
|
|
|
|
for i, node := range subTree {
|
2024-07-12 12:27:00 +00:00
|
|
|
tNode, fileName, err := parseTreeNode(node)
|
2023-08-16 10:59:19 +00:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != 0 {
|
|
|
|
if filepath, err = formFilePath(node, fileName, namesMap); err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid node order: %w", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
filepath = parentPrefix + fileName
|
2024-07-12 12:27:00 +00:00
|
|
|
for _, id := range tNode.ID {
|
|
|
|
namesMap[id] = filepath
|
|
|
|
}
|
2023-08-16 10:59:19 +00:00
|
|
|
}
|
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
multipartInfo, err := newMultipartInfoFromTreeNode(c.reqLogger(ctx), filepath, tNode)
|
2023-12-04 06:42:25 +00:00
|
|
|
if err != nil || multipartInfo.Finished {
|
2022-05-24 08:41:10 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-08-16 10:59:19 +00:00
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
for _, id := range node.GetParentID() {
|
|
|
|
key := formLatestNodeKey(id, fileName)
|
|
|
|
multipartInfos, ok := multiparts[key]
|
|
|
|
if !ok {
|
|
|
|
multipartInfos = []*data.MultipartInfo{multipartInfo}
|
|
|
|
} else {
|
|
|
|
multipartInfos = append(multipartInfos, multipartInfo)
|
|
|
|
}
|
2023-08-16 10:59:19 +00:00
|
|
|
|
2024-07-12 12:27:00 +00:00
|
|
|
multiparts[key] = multipartInfos
|
|
|
|
}
|
2023-08-16 10:59:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
result := make([]*data.MultipartInfo, 0, len(multiparts))
|
|
|
|
for _, multipartInfo := range multiparts {
|
|
|
|
result = append(result, multipartInfo...)
|
2022-05-24 08:41:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
2022-05-23 14:34:13 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error) {
|
2022-05-24 11:30:37 +00:00
|
|
|
path := pathFromName(objectName)
|
2023-03-14 14:31:15 +00:00
|
|
|
p := &GetNodesParams{
|
2022-09-13 09:44:18 +00:00
|
|
|
BktInfo: bktInfo,
|
2022-05-24 11:30:37 +00:00
|
|
|
TreeID: systemTree,
|
|
|
|
Path: path,
|
|
|
|
AllAttrs: true,
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes, err := c.service.GetNodes(ctx, p)
|
2022-05-24 11:30:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
log := c.reqLogger(ctx)
|
2022-05-24 11:30:37 +00:00
|
|
|
for _, node := range nodes {
|
2024-03-04 12:35:23 +00:00
|
|
|
info, err := newMultipartInfo(log, node)
|
2022-05-24 11:30:37 +00:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if info.UploadID == uploadID {
|
2023-12-04 06:42:25 +00:00
|
|
|
if info.Finished {
|
|
|
|
break
|
|
|
|
}
|
2022-05-24 11:30:37 +00:00
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNodeNotFound
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) {
|
2024-08-21 12:10:34 +00:00
|
|
|
parts, err := c.service.GetSubTree(ctx, bktInfo, systemTree, []uint64{multipartNodeID}, 2, false)
|
2022-05-24 11:30:37 +00:00
|
|
|
if err != nil {
|
2024-07-18 13:40:55 +00:00
|
|
|
return nil, err
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
meta := map[string]string{
|
|
|
|
partNumberKV: strconv.Itoa(info.Number),
|
|
|
|
oidKV: info.OID.EncodeToString(),
|
2023-06-01 13:45:28 +00:00
|
|
|
sizeKV: strconv.FormatUint(info.Size, 10),
|
2022-05-31 12:38:06 +00:00
|
|
|
createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10),
|
|
|
|
etagKV: info.ETag,
|
2023-10-02 08:52:07 +00:00
|
|
|
md5KV: info.MD5,
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
objToDelete := make([]oid.ID, 0, 1)
|
|
|
|
partsToDelete := make([]uint64, 0, 1)
|
|
|
|
var (
|
|
|
|
latestPartID uint64
|
|
|
|
maxTimestamp uint64
|
|
|
|
)
|
|
|
|
|
|
|
|
multiNodeID := MultiID{multipartNodeID}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
for _, part := range parts {
|
2024-07-18 13:40:55 +00:00
|
|
|
if multiNodeID.Equal(part.GetNodeID()) {
|
2022-05-24 11:30:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2024-07-18 13:40:55 +00:00
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
partInfo, err := newPartInfo(part)
|
|
|
|
if err != nil {
|
2024-07-12 12:27:00 +00:00
|
|
|
c.reqLogger(ctx).Warn(logs.FailedToParsePartInfo,
|
|
|
|
zap.String("key", info.Key),
|
|
|
|
zap.String("upload id", info.UploadID),
|
|
|
|
zap.Uint64("multipart node id ", multipartNodeID),
|
2024-07-18 13:40:55 +00:00
|
|
|
zap.Uint64s("id", part.GetNodeID()),
|
2024-07-12 12:27:00 +00:00
|
|
|
zap.Error(err))
|
2022-05-24 11:30:37 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if partInfo.Number == info.Number {
|
2024-07-18 13:40:55 +00:00
|
|
|
nodeID := part.GetNodeID()[0]
|
|
|
|
objToDelete = append(objToDelete, partInfo.OID)
|
|
|
|
partsToDelete = append(partsToDelete, nodeID)
|
|
|
|
timestamp := partInfo.Timestamp
|
|
|
|
if timestamp > maxTimestamp {
|
|
|
|
maxTimestamp = timestamp
|
|
|
|
latestPartID = nodeID
|
|
|
|
}
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
if len(objToDelete) != 0 {
|
|
|
|
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latestPartID, multipartNodeID, meta); err != nil {
|
|
|
|
return nil, fmt.Errorf("move part node: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, nodeID := range partsToDelete {
|
|
|
|
if nodeID == latestPartID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err = c.service.RemoveNode(ctx, bktInfo, systemTree, nodeID); err != nil {
|
|
|
|
c.reqLogger(ctx).Warn(logs.FailedToRemoveOldPartNode,
|
|
|
|
zap.String("key", info.Key),
|
|
|
|
zap.String("upload id", info.UploadID),
|
|
|
|
zap.Uint64("id", nodeID))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return objToDelete, nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 13:16:55 +00:00
|
|
|
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
|
2024-07-18 13:40:55 +00:00
|
|
|
return nil, err
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNoNodeToRemove
|
2022-05-24 11:30:37 +00:00
|
|
|
}
|
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) {
|
2024-08-21 12:10:34 +00:00
|
|
|
parts, err := c.service.GetSubTree(ctx, bktInfo, systemTree, []uint64{multipartNodeID}, 2, false)
|
2022-05-24 13:07:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-18 13:40:55 +00:00
|
|
|
result := make([]*data.PartInfoExtended, 0, len(parts))
|
2022-05-24 13:07:47 +00:00
|
|
|
for _, part := range parts {
|
2024-07-12 12:27:00 +00:00
|
|
|
if len(part.GetNodeID()) != 1 {
|
|
|
|
// multipart parts nodeID shouldn't have multiple values
|
|
|
|
c.reqLogger(ctx).Warn(logs.UnexpectedMultiNodeIDsInSubTreeMultiParts,
|
|
|
|
zap.Uint64("multipart node id ", multipartNodeID),
|
|
|
|
zap.Uint64s("node ids", part.GetNodeID()))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if part.GetNodeID()[0] == multipartNodeID {
|
2022-05-24 13:07:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
partInfo, err := newPartInfo(part)
|
|
|
|
if err != nil {
|
2024-07-12 12:27:00 +00:00
|
|
|
c.reqLogger(ctx).Warn(logs.FailedToParsePartInfo,
|
|
|
|
zap.Uint64("multipart node id ", multipartNodeID),
|
|
|
|
zap.Uint64s("node ids", part.GetNodeID()),
|
|
|
|
zap.Error(err))
|
2022-05-24 13:07:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result = append(result, partInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2024-08-12 11:22:56 +00:00
|
|
|
func (c *Tree) PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error) {
|
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, bucketLifecycleFilename)
|
2024-09-27 08:13:13 +00:00
|
|
|
isErrNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
2024-08-12 11:22:56 +00:00
|
|
|
if err != nil && !isErrNotFound {
|
|
|
|
return nil, fmt.Errorf("couldn't get node: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
meta := make(map[string]string)
|
|
|
|
meta[FileNameKey] = bucketLifecycleFilename
|
|
|
|
meta[oidKV] = addr.Object().EncodeToString()
|
|
|
|
meta[cidKV] = addr.Container().EncodeToString()
|
|
|
|
|
|
|
|
if isErrNotFound {
|
|
|
|
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNoNodeToRemove
|
2024-08-12 11:22:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
latest := multiNode.Latest()
|
|
|
|
ind := latest.GetLatestNodeIndex()
|
|
|
|
if latest.IsSplit() {
|
|
|
|
c.reqLogger(ctx).Error(logs.BucketLifecycleNodeHasMultipleIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latest.ID[ind], 0, meta); err != nil {
|
|
|
|
return nil, fmt.Errorf("move lifecycle node: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
objToDelete := make([]oid.Address, 1, len(multiNode.nodes))
|
|
|
|
objToDelete[0], err = getTreeNodeAddress(latest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("parse object addr of latest lifecycle node in tree: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
objToDelete = append(objToDelete, c.cleanOldNodes(ctx, multiNode.Old(), bktInfo)...)
|
|
|
|
|
|
|
|
return objToDelete, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Tree) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
|
|
|
|
node, err := c.getSystemNode(ctx, bktInfo, bucketLifecycleFilename)
|
|
|
|
if err != nil {
|
|
|
|
return oid.Address{}, fmt.Errorf("get lifecycle node: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return getTreeNodeAddress(node.Latest())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Tree) DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error) {
|
|
|
|
multiNode, err := c.getSystemNode(ctx, bktInfo, bucketLifecycleFilename)
|
2024-09-27 08:13:13 +00:00
|
|
|
isErrNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
2024-08-12 11:22:56 +00:00
|
|
|
if err != nil && !isErrNotFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if isErrNotFound {
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNoNodeToRemove
|
2024-08-12 11:22:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
objToDelete := c.cleanOldNodes(ctx, multiNode.nodes, bktInfo)
|
|
|
|
if len(objToDelete) != len(multiNode.nodes) {
|
|
|
|
return nil, fmt.Errorf("failed to clean all old lifecycle nodes")
|
|
|
|
}
|
|
|
|
|
|
|
|
return objToDelete, nil
|
|
|
|
}
|
|
|
|
|
2023-12-04 06:42:25 +00:00
|
|
|
func (c *Tree) DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
|
|
|
|
err := c.service.RemoveNode(ctx, bktInfo, systemTree, multipartInfo.ID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
multipartInfo.Finished = true
|
|
|
|
|
|
|
|
return c.CreateMultipartUpload(ctx, bktInfo, multipartInfo)
|
2022-05-24 13:27:15 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) PutLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
2022-05-26 13:11:14 +00:00
|
|
|
meta := map[string]string{isLockKV: "true"}
|
|
|
|
|
2022-06-27 09:33:36 +00:00
|
|
|
if lock.IsLegalHoldSet() {
|
|
|
|
meta[legalHoldOIDKV] = lock.LegalHold().EncodeToString()
|
2022-06-08 12:33:00 +00:00
|
|
|
}
|
2022-06-27 09:33:36 +00:00
|
|
|
if lock.IsRetentionSet() {
|
|
|
|
meta[retentionOIDKV] = lock.Retention().EncodeToString()
|
|
|
|
meta[untilDateKV] = lock.UntilDate()
|
|
|
|
if lock.IsCompliance() {
|
2022-05-26 13:11:14 +00:00
|
|
|
meta[isComplianceKV] = "true"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-27 09:33:36 +00:00
|
|
|
if lock.ID() == 0 {
|
2023-03-14 14:31:15 +00:00
|
|
|
_, err := c.service.AddNode(ctx, bktInfo, versionTree, nodeID, meta)
|
2022-05-26 13:11:14 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
return c.service.MoveNode(ctx, bktInfo, versionTree, lock.ID(), nodeID, meta)
|
2022-05-26 13:11:14 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
lockNode, err := c.getTreeNode(ctx, bktInfo, nodeID, isLockKV)
|
2022-05-26 13:11:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-31 14:25:47 +00:00
|
|
|
return getLock(lockNode)
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func getLock(lockNode *treeNode) (*data.LockInfo, error) {
|
2022-05-26 13:11:14 +00:00
|
|
|
if lockNode == nil {
|
2022-06-27 09:33:36 +00:00
|
|
|
return &data.LockInfo{}, nil
|
2022-05-26 13:11:14 +00:00
|
|
|
}
|
2024-07-12 12:27:00 +00:00
|
|
|
if lockNode.IsSplit() {
|
|
|
|
return nil, errors.New("invalid lock node: this is split node")
|
|
|
|
}
|
|
|
|
lockInfo := data.NewLockInfo(lockNode.ID[0])
|
2022-05-26 13:11:14 +00:00
|
|
|
|
|
|
|
if legalHold, ok := lockNode.Get(legalHoldOIDKV); ok {
|
|
|
|
var legalHoldOID oid.ID
|
2022-05-31 14:25:47 +00:00
|
|
|
if err := legalHoldOID.DecodeString(legalHold); err != nil {
|
2022-05-26 13:11:14 +00:00
|
|
|
return nil, fmt.Errorf("invalid legal hold object id: %w", err)
|
|
|
|
}
|
2022-06-27 09:33:36 +00:00
|
|
|
lockInfo.SetLegalHold(legalHoldOID)
|
2022-05-26 13:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if retention, ok := lockNode.Get(retentionOIDKV); ok {
|
|
|
|
var retentionOID oid.ID
|
2022-05-31 14:25:47 +00:00
|
|
|
if err := retentionOID.DecodeString(retention); err != nil {
|
2022-05-26 13:11:14 +00:00
|
|
|
return nil, fmt.Errorf("invalid retention object id: %w", err)
|
|
|
|
}
|
2022-06-27 09:33:36 +00:00
|
|
|
_, isCompliance := lockNode.Get(isComplianceKV)
|
|
|
|
untilDate, _ := lockNode.Get(untilDateKV)
|
|
|
|
lockInfo.SetRetention(retentionOID, untilDate, isCompliance)
|
2022-05-26 13:11:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return lockInfo, nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
2022-09-13 09:44:18 +00:00
|
|
|
nodes, err := c.getTreeNodes(ctx, bktInfo, objVersion.ID, isTagKV, isLockKV)
|
2022-05-31 14:25:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
lockInfo, err := getLock(nodes[isLockKV])
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return getObjectTagging(nodes[isTagKV]), lockInfo, nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) addVersion(ctx context.Context, bktInfo *data.BucketInfo, treeID string, version *data.NodeVersion) (uint64, error) {
|
2022-06-10 11:57:41 +00:00
|
|
|
path := pathFromName(version.FilePath)
|
2022-05-17 14:56:05 +00:00
|
|
|
meta := map[string]string{
|
2024-07-22 09:00:17 +00:00
|
|
|
oidKV: version.OID.EncodeToString(),
|
|
|
|
FileNameKey: path[len(path)-1],
|
|
|
|
ownerKV: version.Owner.EncodeToString(),
|
|
|
|
createdKV: strconv.FormatInt(version.Created.UTC().UnixMilli(), 10),
|
|
|
|
creationEpochKV: strconv.FormatUint(version.CreationEpoch, 10),
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2022-07-18 14:51:34 +00:00
|
|
|
if version.Size > 0 {
|
2023-06-01 13:45:28 +00:00
|
|
|
meta[sizeKV] = strconv.FormatUint(version.Size, 10)
|
2022-07-18 14:51:34 +00:00
|
|
|
}
|
|
|
|
if len(version.ETag) > 0 {
|
|
|
|
meta[etagKV] = version.ETag
|
|
|
|
}
|
2023-10-02 08:52:07 +00:00
|
|
|
if len(version.MD5) > 0 {
|
|
|
|
meta[md5KV] = version.MD5
|
|
|
|
}
|
2022-07-18 14:51:34 +00:00
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
if version.IsDeleteMarker {
|
2022-05-18 06:51:12 +00:00
|
|
|
meta[isDeleteMarkerKV] = "true"
|
|
|
|
}
|
|
|
|
|
2023-06-27 12:49:20 +00:00
|
|
|
if version.IsCombined {
|
|
|
|
meta[isCombinedKV] = "true"
|
|
|
|
}
|
|
|
|
|
2022-05-17 14:56:05 +00:00
|
|
|
if version.IsUnversioned {
|
|
|
|
meta[isUnversionedKV] = "true"
|
|
|
|
|
2022-09-13 09:44:18 +00:00
|
|
|
node, err := c.getUnversioned(ctx, bktInfo, treeID, version.FilePath)
|
2022-05-17 14:56:05 +00:00
|
|
|
if err == nil {
|
2023-03-14 14:31:15 +00:00
|
|
|
if err = c.service.MoveNode(ctx, bktInfo, treeID, node.ID, node.ParenID, meta); err != nil {
|
2022-08-29 10:57:11 +00:00
|
|
|
return 0, err
|
2022-08-10 00:04:26 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 09:44:18 +00:00
|
|
|
return node.ID, c.clearOutdatedVersionInfo(ctx, bktInfo, treeID, node.ID)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2024-09-27 08:13:13 +00:00
|
|
|
if !errors.Is(err, tree.ErrNodeNotFound) {
|
2022-08-29 10:57:11 +00:00
|
|
|
return 0, err
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
return c.service.AddNodeByPath(ctx, bktInfo, treeID, path[:len(path)-1], meta)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) clearOutdatedVersionInfo(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
|
2022-09-13 09:44:18 +00:00
|
|
|
taggingNode, err := c.getTreeNode(ctx, bktInfo, nodeID, isTagKV)
|
2022-08-10 00:04:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if taggingNode != nil {
|
2024-07-12 12:27:00 +00:00
|
|
|
return c.service.RemoveNode(ctx, bktInfo, treeID, taggingNode.ID[0])
|
2022-08-10 00:04:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
func (c *Tree) getVersions(ctx context.Context, bktInfo *data.BucketInfo, treeID, filepath string, onlyUnversioned bool) ([]*data.NodeVersion, error) {
|
2024-07-22 09:00:17 +00:00
|
|
|
keysToReturn := []string{oidKV, isCombinedKV, isUnversionedKV, isDeleteMarkerKV, etagKV, sizeKV, md5KV, creationEpochKV}
|
2022-05-23 09:53:48 +00:00
|
|
|
path := pathFromName(filepath)
|
2023-03-14 14:31:15 +00:00
|
|
|
p := &GetNodesParams{
|
2022-09-13 09:44:18 +00:00
|
|
|
BktInfo: bktInfo,
|
2022-05-25 01:58:25 +00:00
|
|
|
TreeID: treeID,
|
|
|
|
Path: path,
|
|
|
|
Meta: keysToReturn,
|
|
|
|
LatestOnly: false,
|
|
|
|
AllAttrs: false,
|
|
|
|
}
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes, err := c.service.GetNodes(ctx, p)
|
2022-05-17 14:56:05 +00:00
|
|
|
if err != nil {
|
2024-09-27 08:13:13 +00:00
|
|
|
if errors.Is(err, tree.ErrNodeNotFound) {
|
2022-05-17 14:56:05 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2022-09-13 10:35:30 +00:00
|
|
|
return nil, err
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2024-03-04 12:35:23 +00:00
|
|
|
log := c.reqLogger(ctx)
|
2022-05-20 15:02:00 +00:00
|
|
|
result := make([]*data.NodeVersion, 0, len(nodes))
|
2022-05-17 14:56:05 +00:00
|
|
|
for _, node := range nodes {
|
2024-03-04 12:35:23 +00:00
|
|
|
nodeVersion, err := newNodeVersion(log, filepath, node)
|
2022-05-17 14:56:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 14:56:05 +00:00
|
|
|
if onlyUnversioned && !nodeVersion.IsUnversioned {
|
2022-05-17 14:56:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-05-17 14:56:05 +00:00
|
|
|
result = append(result, nodeVersion)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2022-04-22 07:18:21 +00:00
|
|
|
func metaFromSettings(settings *data.BucketSettings) map[string]string {
|
|
|
|
results := make(map[string]string, 3)
|
|
|
|
|
2023-03-14 14:31:15 +00:00
|
|
|
results[FileNameKey] = settingsFileName
|
2022-07-19 14:58:18 +00:00
|
|
|
results[versioningKV] = settings.Versioning
|
2022-04-22 07:18:21 +00:00
|
|
|
results[lockConfigurationKV] = encodeLockConfiguration(settings.LockConfiguration)
|
2024-02-12 12:28:55 +00:00
|
|
|
results[cannedACLKV] = settings.CannedACL
|
2024-02-16 08:09:54 +00:00
|
|
|
if settings.OwnerKey != nil {
|
|
|
|
results[ownerKeyKV] = hex.EncodeToString(settings.OwnerKey.Bytes())
|
|
|
|
}
|
2022-04-22 07:18:21 +00:00
|
|
|
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2022-08-23 14:54:24 +00:00
|
|
|
func metaFromMultipart(info *data.MultipartInfo, fileName string) map[string]string {
|
2023-03-14 14:31:15 +00:00
|
|
|
info.Meta[FileNameKey] = fileName
|
2022-05-23 14:34:13 +00:00
|
|
|
info.Meta[uploadIDKV] = info.UploadID
|
|
|
|
info.Meta[ownerKV] = info.Owner.EncodeToString()
|
|
|
|
info.Meta[createdKV] = strconv.FormatInt(info.Created.UTC().UnixMilli(), 10)
|
2023-12-04 06:42:25 +00:00
|
|
|
if info.Finished {
|
|
|
|
info.Meta[finishedKV] = strconv.FormatBool(info.Finished)
|
|
|
|
}
|
2024-07-22 09:00:17 +00:00
|
|
|
info.Meta[creationEpochKV] = strconv.FormatUint(info.CreationEpoch, 10)
|
2022-05-23 14:34:13 +00:00
|
|
|
|
|
|
|
return info.Meta
|
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name string) (*multiSystemNode, error) {
|
2023-03-14 14:31:15 +00:00
|
|
|
p := &GetNodesParams{
|
2022-09-13 09:44:18 +00:00
|
|
|
BktInfo: bktInfo,
|
2024-07-16 14:36:52 +00:00
|
|
|
TreeID: systemTree,
|
2024-07-17 09:44:38 +00:00
|
|
|
Path: []string{name},
|
2022-05-25 01:58:25 +00:00
|
|
|
LatestOnly: false,
|
2024-07-16 14:36:52 +00:00
|
|
|
AllAttrs: true,
|
2022-05-25 01:58:25 +00:00
|
|
|
}
|
2023-03-14 14:31:15 +00:00
|
|
|
nodes, err := c.service.GetNodes(ctx, p)
|
2022-04-22 07:18:21 +00:00
|
|
|
if err != nil {
|
2022-09-13 10:35:30 +00:00
|
|
|
return nil, err
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
2024-07-16 14:36:52 +00:00
|
|
|
|
|
|
|
nodes = filterMultipartNodes(nodes)
|
|
|
|
|
2022-05-17 14:56:05 +00:00
|
|
|
if len(nodes) == 0 {
|
2024-09-27 08:13:13 +00:00
|
|
|
return nil, tree.ErrNodeNotFound
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
2022-05-17 14:56:05 +00:00
|
|
|
if len(nodes) != 1 {
|
2024-07-17 09:44:38 +00:00
|
|
|
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemNodes, zap.String("name", name))
|
2022-04-22 07:18:21 +00:00
|
|
|
}
|
|
|
|
|
2024-07-17 09:44:38 +00:00
|
|
|
return newMultiNode(nodes)
|
2022-05-17 14:56:05 +00:00
|
|
|
}
|
|
|
|
|
2024-07-16 14:36:52 +00:00
|
|
|
func filterMultipartNodes(nodes []NodeResponse) []NodeResponse {
|
|
|
|
res := make([]NodeResponse, 0, len(nodes))
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for _, node := range nodes {
|
|
|
|
for _, meta := range node.GetMeta() {
|
|
|
|
if meta.GetKey() == uploadIDKV {
|
|
|
|
continue LOOP
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
res = append(res, node)
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2023-06-09 13:19:23 +00:00
|
|
|
func (c *Tree) reqLogger(ctx context.Context) *zap.Logger {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqLogger := middleware.GetReqLog(ctx)
|
2023-06-09 13:19:23 +00:00
|
|
|
if reqLogger != nil {
|
|
|
|
return reqLogger
|
|
|
|
}
|
|
|
|
return c.log
|
|
|
|
}
|
|
|
|
|
2022-04-22 07:18:21 +00:00
|
|
|
func parseLockConfiguration(value string) (*data.ObjectLockConfiguration, error) {
|
|
|
|
result := &data.ObjectLockConfiguration{}
|
|
|
|
if len(value) == 0 {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
lockValues := strings.Split(value, ",")
|
|
|
|
result.ObjectLockEnabled = lockValues[0]
|
|
|
|
|
|
|
|
if len(lockValues) == 1 {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(lockValues) != 4 {
|
|
|
|
return nil, fmt.Errorf("invalid lock configuration: %s", value)
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
var days, years int64
|
|
|
|
|
|
|
|
if len(lockValues[1]) > 0 {
|
|
|
|
if days, err = strconv.ParseInt(lockValues[1], 10, 64); err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid lock configuration: %s", value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(lockValues[3]) > 0 {
|
|
|
|
if years, err = strconv.ParseInt(lockValues[3], 10, 64); err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid lock configuration: %s", value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result.Rule = &data.ObjectLockRule{
|
|
|
|
DefaultRetention: &data.DefaultRetention{
|
|
|
|
Days: days,
|
|
|
|
Mode: lockValues[2],
|
|
|
|
Years: years,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeLockConfiguration(conf *data.ObjectLockConfiguration) string {
|
|
|
|
if conf == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
if conf.Rule == nil || conf.Rule.DefaultRetention == nil {
|
|
|
|
return conf.ObjectLockEnabled
|
|
|
|
}
|
|
|
|
|
|
|
|
defaults := conf.Rule.DefaultRetention
|
|
|
|
return fmt.Sprintf("%s,%d,%s,%d", conf.ObjectLockEnabled, defaults.Days, defaults.Mode, defaults.Years)
|
|
|
|
}
|