2024-01-17 12:14:26 +00:00
|
|
|
package layer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
2024-09-27 09:18:41 +00:00
|
|
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
2024-01-17 12:14:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
|
|
"github.com/panjf2000/ants/v2"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
|
|
|
ListObjectsParamsCommon struct {
|
|
|
|
BktInfo *data.BucketInfo
|
|
|
|
Delimiter string
|
|
|
|
Encode string
|
|
|
|
MaxKeys int
|
|
|
|
Prefix string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
|
|
|
ListObjectsParamsV1 struct {
|
|
|
|
ListObjectsParamsCommon
|
|
|
|
Marker string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
|
|
|
ListObjectsParamsV2 struct {
|
|
|
|
ListObjectsParamsCommon
|
|
|
|
ContinuationToken string
|
|
|
|
StartAfter string
|
|
|
|
FetchOwner bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
|
|
|
ListObjectsInfo struct {
|
|
|
|
Prefixes []string
|
2024-01-22 07:01:24 +00:00
|
|
|
Objects []*data.ExtendedNodeVersion
|
2024-01-17 12:14:26 +00:00
|
|
|
IsTruncated bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
|
|
|
ListObjectsInfoV1 struct {
|
|
|
|
ListObjectsInfo
|
|
|
|
NextMarker string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
|
|
|
ListObjectsInfoV2 struct {
|
|
|
|
ListObjectsInfo
|
|
|
|
NextContinuationToken string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectVersionsInfo stores info and list of objects versions.
|
|
|
|
ListObjectVersionsInfo struct {
|
|
|
|
CommonPrefixes []string
|
|
|
|
IsTruncated bool
|
|
|
|
KeyMarker string
|
|
|
|
NextKeyMarker string
|
|
|
|
NextVersionIDMarker string
|
2024-01-17 14:26:02 +00:00
|
|
|
Version []*data.ExtendedNodeVersion
|
|
|
|
DeleteMarker []*data.ExtendedNodeVersion
|
2024-01-17 12:14:26 +00:00
|
|
|
VersionIDMarker string
|
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
commonVersionsListingParams struct {
|
|
|
|
BktInfo *data.BucketInfo
|
|
|
|
Delimiter string
|
|
|
|
Prefix string
|
|
|
|
MaxKeys int
|
|
|
|
Marker string
|
|
|
|
Bookmark string
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-20 22:13:35 +00:00
|
|
|
|
|
|
|
commonLatestVersionsListingParams struct {
|
|
|
|
commonVersionsListingParams
|
|
|
|
ListType ListType
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
type ListType int
|
|
|
|
|
|
|
|
const (
|
|
|
|
ListObjectsV1Type ListType = iota + 1
|
|
|
|
ListObjectsV2Type ListType = iota + 1
|
2024-01-17 12:14:26 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
2024-01-17 12:14:26 +00:00
|
|
|
var result ListObjectsInfoV1
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
prm := commonLatestVersionsListingParams{
|
|
|
|
commonVersionsListingParams: commonVersionsListingParams{
|
|
|
|
BktInfo: p.BktInfo,
|
|
|
|
Delimiter: p.Delimiter,
|
|
|
|
Prefix: p.Prefix,
|
|
|
|
MaxKeys: p.MaxKeys,
|
|
|
|
Marker: p.Marker,
|
|
|
|
Bookmark: p.Marker,
|
|
|
|
},
|
|
|
|
ListType: ListObjectsV1Type,
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if next != nil {
|
|
|
|
result.IsTruncated = true
|
2024-01-22 07:01:24 +00:00
|
|
|
result.NextMarker = objects[len(objects)-1].Name()
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
result.Prefixes, result.Objects = triageExtendedObjects(objects)
|
2024-01-17 12:14:26 +00:00
|
|
|
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
2024-01-17 12:14:26 +00:00
|
|
|
var result ListObjectsInfoV2
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
prm := commonLatestVersionsListingParams{
|
|
|
|
commonVersionsListingParams: commonVersionsListingParams{
|
|
|
|
BktInfo: p.BktInfo,
|
|
|
|
Delimiter: p.Delimiter,
|
|
|
|
Prefix: p.Prefix,
|
|
|
|
MaxKeys: p.MaxKeys,
|
|
|
|
Marker: p.StartAfter,
|
|
|
|
Bookmark: p.ContinuationToken,
|
|
|
|
},
|
|
|
|
ListType: ListObjectsV2Type,
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
2024-01-17 12:14:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if next != nil {
|
|
|
|
result.IsTruncated = true
|
2024-01-22 07:01:24 +00:00
|
|
|
result.NextContinuationToken = next.NodeVersion.OID.EncodeToString()
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
result.Prefixes, result.Objects = triageExtendedObjects(objects)
|
2024-01-17 12:14:26 +00:00
|
|
|
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
2024-01-20 22:13:35 +00:00
|
|
|
prm := commonVersionsListingParams{
|
|
|
|
BktInfo: p.BktInfo,
|
|
|
|
Delimiter: p.Delimiter,
|
|
|
|
Prefix: p.Prefix,
|
|
|
|
MaxKeys: p.MaxKeys,
|
|
|
|
Marker: p.KeyMarker,
|
|
|
|
Bookmark: p.VersionIDMarker,
|
2024-01-20 21:30:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
objects, isTruncated, err := n.getAllObjectsVersions(ctx, prm)
|
2024-01-17 12:14:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
res := &ListObjectVersionsInfo{
|
|
|
|
KeyMarker: p.KeyMarker,
|
|
|
|
VersionIDMarker: p.VersionIDMarker,
|
2024-01-19 09:53:53 +00:00
|
|
|
IsTruncated: isTruncated,
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-19 09:53:53 +00:00
|
|
|
if res.IsTruncated {
|
|
|
|
res.NextKeyMarker = objects[p.MaxKeys-1].NodeVersion.FilePath
|
|
|
|
res.NextVersionIDMarker = objects[p.MaxKeys-1].NodeVersion.OID.EncodeToString()
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
res.CommonPrefixes, objects = triageExtendedObjects(objects)
|
2024-01-19 09:53:53 +00:00
|
|
|
res.Version, res.DeleteMarker = triageVersions(objects)
|
2024-01-17 12:14:26 +00:00
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) getLatestObjectsVersions(ctx context.Context, p commonLatestVersionsListingParams) (objects []*data.ExtendedNodeVersion, next *data.ExtendedNodeVersion, err error) {
|
2024-01-17 12:14:26 +00:00
|
|
|
if p.MaxKeys == 0 {
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
session, err := n.getListLatestVersionsSession(ctx, p)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
generator, errorCh := nodesGeneratorStream(ctx, p.commonVersionsListingParams, session)
|
2024-01-22 08:09:11 +00:00
|
|
|
objOutCh, err := n.initWorkerPool(ctx, 2, p.commonVersionsListingParams, generator)
|
2024-01-17 12:14:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
objects = make([]*data.ExtendedNodeVersion, 0, p.MaxKeys+1)
|
2024-01-19 06:26:58 +00:00
|
|
|
objects = append(objects, session.Next...)
|
2024-01-17 12:14:26 +00:00
|
|
|
for obj := range objOutCh {
|
|
|
|
objects = append(objects, obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = <-errorCh; err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-01-26 13:57:34 +00:00
|
|
|
sort.Slice(objects, func(i, j int) bool { return objects[i].NodeVersion.FilePath < objects[j].NodeVersion.FilePath })
|
|
|
|
|
2024-01-17 12:14:26 +00:00
|
|
|
if len(objects) > p.MaxKeys {
|
|
|
|
next = objects[p.MaxKeys]
|
2024-01-20 22:13:35 +00:00
|
|
|
n.putListLatestVersionsSession(ctx, p, session, objects)
|
2024-01-17 12:14:26 +00:00
|
|
|
objects = objects[:p.MaxKeys]
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) getAllObjectsVersions(ctx context.Context, p commonVersionsListingParams) ([]*data.ExtendedNodeVersion, bool, error) {
|
2024-01-19 09:53:53 +00:00
|
|
|
if p.MaxKeys == 0 {
|
|
|
|
return nil, false, nil
|
2024-01-19 06:26:58 +00:00
|
|
|
}
|
2024-01-17 12:14:26 +00:00
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
session, err := n.getListAllVersionsSession(ctx, p)
|
2024-01-19 09:53:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
2024-01-17 12:14:26 +00:00
|
|
|
|
2024-01-20 21:30:25 +00:00
|
|
|
generator, errorCh := nodesGeneratorVersions(ctx, p, session)
|
2024-01-22 08:09:11 +00:00
|
|
|
objOutCh, err := n.initWorkerPool(ctx, 2, p, generator)
|
2024-01-17 12:14:26 +00:00
|
|
|
if err != nil {
|
2024-01-19 09:53:53 +00:00
|
|
|
return nil, false, err
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
allObjects := handleGeneratedVersions(objOutCh, p, session)
|
|
|
|
|
2024-01-26 13:57:34 +00:00
|
|
|
sort.SliceStable(allObjects, func(i, j int) bool { return allObjects[i].NodeVersion.FilePath < allObjects[j].NodeVersion.FilePath })
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
if err = <-errorCh; err != nil {
|
|
|
|
return nil, false, fmt.Errorf("failed to get next object from stream: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var isTruncated bool
|
|
|
|
if len(allObjects) > p.MaxKeys {
|
|
|
|
isTruncated = true
|
|
|
|
n.putListAllVersionsSession(ctx, p, session, allObjects)
|
|
|
|
allObjects = allObjects[:p.MaxKeys]
|
|
|
|
}
|
|
|
|
|
|
|
|
return allObjects, isTruncated, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func handleGeneratedVersions(objOutCh <-chan *data.ExtendedNodeVersion, p commonVersionsListingParams, session *data.ListSession) []*data.ExtendedNodeVersion {
|
2024-01-19 09:53:53 +00:00
|
|
|
var lastName string
|
2024-01-20 22:13:35 +00:00
|
|
|
var listRowStartIndex int
|
|
|
|
allObjects := make([]*data.ExtendedNodeVersion, 0, p.MaxKeys)
|
2024-01-17 12:14:26 +00:00
|
|
|
for eoi := range objOutCh {
|
2024-01-19 06:26:58 +00:00
|
|
|
name := eoi.NodeVersion.FilePath
|
2024-01-22 07:01:24 +00:00
|
|
|
if eoi.DirName != "" {
|
|
|
|
name = eoi.DirName
|
2024-01-19 06:26:58 +00:00
|
|
|
}
|
|
|
|
|
2024-01-19 09:53:53 +00:00
|
|
|
if lastName != name {
|
2024-01-20 22:13:35 +00:00
|
|
|
formVersionsListRow(allObjects, listRowStartIndex, session)
|
|
|
|
listRowStartIndex = len(allObjects)
|
|
|
|
allObjects = append(allObjects, eoi)
|
2024-01-22 07:01:24 +00:00
|
|
|
} else if eoi.DirName == "" {
|
2024-01-20 22:13:35 +00:00
|
|
|
allObjects = append(allObjects, eoi)
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-19 09:53:53 +00:00
|
|
|
lastName = name
|
2024-01-19 06:26:58 +00:00
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
formVersionsListRow(allObjects, listRowStartIndex, session)
|
2024-01-19 09:53:53 +00:00
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
return allObjects
|
|
|
|
}
|
2024-01-19 09:53:53 +00:00
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
func formVersionsListRow(objects []*data.ExtendedNodeVersion, rowStartIndex int, session *data.ListSession) {
|
|
|
|
if len(objects) == 0 {
|
|
|
|
return
|
2024-01-19 09:53:53 +00:00
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
prevVersions := objects[rowStartIndex:]
|
|
|
|
sort.Slice(prevVersions, func(i, j int) bool {
|
|
|
|
return prevVersions[j].NodeVersion.Timestamp < prevVersions[i].NodeVersion.Timestamp // sort in reverse order to have last added first
|
|
|
|
})
|
2024-01-19 09:53:53 +00:00
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
prevVersions[0].IsLatest = len(session.Next) == 0 || session.Next[0].NodeVersion.FilePath != prevVersions[0].NodeVersion.FilePath
|
|
|
|
|
|
|
|
for _, version := range prevVersions[1:] {
|
|
|
|
version.IsLatest = false
|
|
|
|
}
|
2024-01-20 22:13:35 +00:00
|
|
|
}
|
2024-01-19 09:53:53 +00:00
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) getListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams) (*data.ListSession, error) {
|
2024-01-20 22:13:35 +00:00
|
|
|
return n.getListVersionsSession(ctx, p.commonVersionsListingParams, true)
|
|
|
|
}
|
2024-01-19 09:53:53 +00:00
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) getListAllVersionsSession(ctx context.Context, p commonVersionsListingParams) (*data.ListSession, error) {
|
2024-01-20 22:13:35 +00:00
|
|
|
return n.getListVersionsSession(ctx, p, false)
|
2024-01-19 09:53:53 +00:00
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) getListVersionsSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (*data.ListSession, error) {
|
2024-01-19 09:53:53 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
|
|
|
|
2024-01-20 21:30:25 +00:00
|
|
|
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, p.Bookmark)
|
2024-01-19 09:53:53 +00:00
|
|
|
session := n.cache.GetListSession(owner, cacheKey)
|
|
|
|
if session == nil {
|
2024-01-20 22:13:35 +00:00
|
|
|
return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
|
2024-01-19 09:53:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if session.Acquired.Swap(true) {
|
2024-01-20 22:13:35 +00:00
|
|
|
return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
|
2024-01-19 09:53:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// after reading next object from stream in session
|
|
|
|
// the current cache value already doesn't match with next token in cache key
|
|
|
|
n.cache.DeleteListSession(owner, cacheKey)
|
|
|
|
|
|
|
|
return session, nil
|
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) initNewVersionsByPrefixSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (session *data.ListSession, err error) {
|
2024-01-19 09:53:53 +00:00
|
|
|
session = &data.ListSession{NamesMap: make(map[string]struct{})}
|
|
|
|
session.Context, session.Cancel = context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
if bd, err := middleware.GetBoxData(ctx); err == nil {
|
2024-04-16 08:20:35 +00:00
|
|
|
session.Context = middleware.SetBox(session.Context, &middleware.Box{AccessBox: bd})
|
2024-01-19 09:53:53 +00:00
|
|
|
}
|
|
|
|
|
2024-01-20 22:13:35 +00:00
|
|
|
session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, latestOnly)
|
2024-01-19 09:53:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return session, nil
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) putListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
|
2024-01-20 22:13:35 +00:00
|
|
|
if len(allObjects) <= p.MaxKeys {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var cacheKey cache.ListSessionKey
|
|
|
|
switch p.ListType {
|
|
|
|
case ListObjectsV1Type:
|
2024-01-22 07:01:24 +00:00
|
|
|
cacheKey = cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, allObjects[p.MaxKeys-1].Name())
|
2024-01-20 22:13:35 +00:00
|
|
|
case ListObjectsV2Type:
|
2024-01-22 07:01:24 +00:00
|
|
|
cacheKey = cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, allObjects[p.MaxKeys].NodeVersion.OID.EncodeToString())
|
2024-01-20 22:13:35 +00:00
|
|
|
default:
|
|
|
|
// should never happen
|
|
|
|
panic("invalid list type")
|
|
|
|
}
|
|
|
|
|
|
|
|
session.Acquired.Store(false)
|
2024-01-22 07:01:24 +00:00
|
|
|
session.Next = []*data.ExtendedNodeVersion{allObjects[p.MaxKeys]}
|
2024-01-20 22:13:35 +00:00
|
|
|
n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
|
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) putListAllVersionsSession(ctx context.Context, p commonVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
|
2024-01-20 22:13:35 +00:00
|
|
|
if len(allObjects) <= p.MaxKeys {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
session.Acquired.Store(false)
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
session.Next = make([]*data.ExtendedNodeVersion, len(allObjects)-p.MaxKeys+1)
|
|
|
|
session.Next[0] = allObjects[p.MaxKeys-1]
|
2024-01-20 22:13:35 +00:00
|
|
|
for i, node := range allObjects[p.MaxKeys:] {
|
2024-01-22 07:01:24 +00:00
|
|
|
session.Next[i+1] = node
|
2024-01-20 22:13:35 +00:00
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, session.Next[0].NodeVersion.OID.EncodeToString())
|
2024-01-20 22:13:35 +00:00
|
|
|
n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
func nodesGeneratorStream(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
|
|
|
|
nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
|
2024-01-17 12:14:26 +00:00
|
|
|
errCh := make(chan error, 1)
|
|
|
|
existed := stream.NamesMap
|
|
|
|
|
2024-01-19 06:26:58 +00:00
|
|
|
if len(stream.Next) != 0 {
|
2024-01-17 12:14:26 +00:00
|
|
|
existed[continuationToken] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
limit := p.MaxKeys
|
2024-01-19 06:26:58 +00:00
|
|
|
if len(stream.Next) == 0 {
|
2024-01-17 12:14:26 +00:00
|
|
|
limit++
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var generated int
|
|
|
|
var err error
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for err == nil {
|
|
|
|
node, err := stream.Stream.Next(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if !errors.Is(err, io.EOF) {
|
|
|
|
errCh <- fmt.Errorf("stream next: %w", err)
|
|
|
|
}
|
|
|
|
break LOOP
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
nodeExt := &data.ExtendedNodeVersion{
|
|
|
|
NodeVersion: node,
|
|
|
|
IsLatest: true,
|
|
|
|
DirName: tryDirectoryName(node, p.Prefix, p.Delimiter),
|
|
|
|
}
|
|
|
|
|
|
|
|
if shouldSkip(nodeExt, p, existed) {
|
2024-01-17 12:14:26 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
2024-01-22 07:01:24 +00:00
|
|
|
case nodeCh <- nodeExt:
|
2024-01-17 12:14:26 +00:00
|
|
|
generated++
|
|
|
|
|
|
|
|
if generated == limit { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
|
|
|
break LOOP
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(nodeCh)
|
|
|
|
close(errCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nodeCh, errCh
|
|
|
|
}
|
2024-01-19 06:26:58 +00:00
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
func nodesGeneratorVersions(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
|
|
|
|
nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
|
2024-01-19 06:26:58 +00:00
|
|
|
errCh := make(chan error, 1)
|
|
|
|
existed := stream.NamesMap
|
|
|
|
|
|
|
|
delete(existed, continuationToken)
|
|
|
|
|
2024-01-17 12:14:26 +00:00
|
|
|
go func() {
|
2024-01-19 09:53:53 +00:00
|
|
|
var (
|
|
|
|
generated int
|
|
|
|
ind int
|
|
|
|
err error
|
|
|
|
lastName string
|
2024-01-22 07:01:24 +00:00
|
|
|
node *data.NodeVersion
|
|
|
|
nodeExt *data.ExtendedNodeVersion
|
2024-01-19 09:53:53 +00:00
|
|
|
)
|
2024-01-19 06:26:58 +00:00
|
|
|
|
2024-01-17 12:14:26 +00:00
|
|
|
LOOP:
|
2024-01-19 06:26:58 +00:00
|
|
|
for err == nil {
|
|
|
|
if ind < len(stream.Next) {
|
2024-01-22 07:01:24 +00:00
|
|
|
nodeExt = stream.Next[ind]
|
2024-01-19 06:26:58 +00:00
|
|
|
ind++
|
|
|
|
} else {
|
|
|
|
node, err = stream.Stream.Next(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if !errors.Is(err, io.EOF) {
|
|
|
|
errCh <- fmt.Errorf("stream next: %w", err)
|
|
|
|
}
|
|
|
|
break LOOP
|
|
|
|
}
|
2024-01-22 07:01:24 +00:00
|
|
|
|
|
|
|
nodeExt = &data.ExtendedNodeVersion{
|
|
|
|
NodeVersion: node,
|
|
|
|
DirName: tryDirectoryName(node, p.Prefix, p.Delimiter),
|
|
|
|
}
|
2024-01-19 06:26:58 +00:00
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
if shouldSkipVersions(nodeExt, p, existed) {
|
2024-01-17 14:26:02 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-01-17 12:14:26 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
2024-01-22 07:01:24 +00:00
|
|
|
case nodeCh <- nodeExt:
|
2024-01-17 14:26:02 +00:00
|
|
|
generated++
|
2024-01-22 07:01:24 +00:00
|
|
|
if generated > p.MaxKeys && nodeExt.NodeVersion.FilePath != lastName {
|
2024-01-17 14:26:02 +00:00
|
|
|
break LOOP
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-22 07:01:24 +00:00
|
|
|
lastName = nodeExt.NodeVersion.FilePath
|
2024-01-17 14:26:02 +00:00
|
|
|
}
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-17 14:26:02 +00:00
|
|
|
close(nodeCh)
|
2024-01-19 06:26:58 +00:00
|
|
|
close(errCh)
|
2024-01-17 12:14:26 +00:00
|
|
|
}()
|
|
|
|
|
2024-01-19 06:26:58 +00:00
|
|
|
return nodeCh, errCh
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsListingParams, input <-chan *data.ExtendedNodeVersion) (<-chan *data.ExtendedNodeVersion, error) {
|
2024-01-17 12:14:26 +00:00
|
|
|
reqLog := n.reqLogger(ctx)
|
|
|
|
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
|
|
|
}
|
2024-01-22 07:01:24 +00:00
|
|
|
objCh := make(chan *data.ExtendedNodeVersion, size)
|
2024-01-17 12:14:26 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for node := range input {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
if node.DirName != "" || node.NodeVersion.IsFilledExtra() {
|
2024-01-17 14:26:02 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2024-01-22 07:01:24 +00:00
|
|
|
case objCh <- node:
|
2024-01-17 14:26:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We have to make a copy of pointer to data.NodeVersion
|
|
|
|
// to get correct value in submitted task function.
|
2024-01-22 07:01:24 +00:00
|
|
|
func(node *data.ExtendedNodeVersion) {
|
2024-01-17 14:26:02 +00:00
|
|
|
wg.Add(1)
|
|
|
|
err = pool.Submit(func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.BktInfo, node.NodeVersion)
|
2024-01-17 14:26:02 +00:00
|
|
|
if oi == nil {
|
|
|
|
// try to get object again
|
2024-01-22 07:01:24 +00:00
|
|
|
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.BktInfo, node.NodeVersion); oi == nil {
|
2024-01-17 14:26:02 +00:00
|
|
|
// do not process object which are definitely missing in object service
|
|
|
|
return
|
|
|
|
}
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-17 14:26:02 +00:00
|
|
|
|
2024-01-22 11:11:12 +00:00
|
|
|
realSize, err := GetObjectSize(oi)
|
|
|
|
if err != nil {
|
2024-01-25 06:37:43 +00:00
|
|
|
reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err))
|
2024-01-22 11:11:12 +00:00
|
|
|
realSize = oi.Size
|
|
|
|
}
|
|
|
|
|
|
|
|
node.NodeVersion.FillExtra(&oi.Owner, &oi.Created, realSize)
|
2024-01-17 14:26:02 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2024-01-22 07:01:24 +00:00
|
|
|
case objCh <- node:
|
2024-01-17 14:26:02 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
wg.Done()
|
|
|
|
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-17 14:26:02 +00:00
|
|
|
}(node)
|
|
|
|
}
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(objCh)
|
|
|
|
pool.Release()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return objCh, nil
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
|
|
|
|
if node.NodeVersion.IsDeleteMarker {
|
2024-01-17 12:14:26 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
filePath := node.NodeVersion.FilePath
|
|
|
|
if node.DirName != "" {
|
|
|
|
filePath = node.DirName
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
2024-01-22 07:01:24 +00:00
|
|
|
|
2024-01-17 12:14:26 +00:00
|
|
|
if _, ok := existed[filePath]; ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if filePath <= p.Marker {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-11-11 14:00:36 +00:00
|
|
|
if p.Bookmark != "" && p.Bookmark != p.Marker {
|
2024-01-17 12:14:26 +00:00
|
|
|
if _, ok := existed[continuationToken]; !ok {
|
2024-01-22 07:01:24 +00:00
|
|
|
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
2024-01-17 12:14:26 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
existed[continuationToken] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
existed[filePath] = struct{}{}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
func shouldSkipVersions(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
|
|
|
|
filePath := node.NodeVersion.FilePath
|
|
|
|
if node.DirName != "" {
|
|
|
|
filePath = node.DirName
|
2024-01-17 12:14:26 +00:00
|
|
|
if _, ok := existed[filePath]; ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if filePath < p.Marker {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-01-20 21:30:25 +00:00
|
|
|
if p.Bookmark != "" {
|
2024-01-17 12:14:26 +00:00
|
|
|
if _, ok := existed[continuationToken]; !ok {
|
2024-01-22 07:01:24 +00:00
|
|
|
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
2024-01-17 12:14:26 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
existed[continuationToken] = struct{}{}
|
2024-01-19 06:26:58 +00:00
|
|
|
return true
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
existed[filePath] = struct{}{}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-01-22 07:01:24 +00:00
|
|
|
func triageExtendedObjects(allObjects []*data.ExtendedNodeVersion) (prefixes []string, objects []*data.ExtendedNodeVersion) {
|
2024-01-17 12:14:26 +00:00
|
|
|
for _, ov := range allObjects {
|
2024-01-22 07:01:24 +00:00
|
|
|
if ov.DirName != "" {
|
|
|
|
prefixes = append(prefixes, ov.DirName)
|
2024-01-17 12:14:26 +00:00
|
|
|
} else {
|
|
|
|
objects = append(objects, ov)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-06-25 12:57:55 +00:00
|
|
|
func (n *Layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion) (oi *data.ObjectInfo) {
|
2024-01-17 12:14:26 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
|
|
|
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
|
|
|
return extInfo.ObjectInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
|
|
|
if err != nil {
|
|
|
|
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
oi = objectInfoFromMeta(bktInfo, meta)
|
|
|
|
oi.MD5Sum = node.MD5
|
|
|
|
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
|
|
|
|
|
|
|
return oi
|
|
|
|
}
|
|
|
|
|
|
|
|
// tryDirectoryName forms directory name by prefix and delimiter.
|
|
|
|
// If node isn't a directory empty string is returned.
|
|
|
|
// This function doesn't check if node has a prefix. It must do a caller.
|
|
|
|
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
|
|
|
if len(delimiter) == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
tail := strings.TrimPrefix(node.FilePath, prefix)
|
|
|
|
index := strings.Index(tail, delimiter)
|
|
|
|
if index >= 0 {
|
|
|
|
return prefix + tail[:index+1]
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
func filterVersionsByMarker(objects []*data.ExtendedNodeVersion, p *ListObjectVersionsParams) ([]*data.ExtendedNodeVersion, error) {
|
2024-01-17 12:14:26 +00:00
|
|
|
if p.KeyMarker == "" {
|
|
|
|
return objects, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, obj := range objects {
|
2024-01-17 14:26:02 +00:00
|
|
|
if obj.NodeVersion.FilePath == p.KeyMarker {
|
2024-01-17 12:14:26 +00:00
|
|
|
for j := i; j < len(objects); j++ {
|
2024-01-17 14:26:02 +00:00
|
|
|
if objects[j].NodeVersion.FilePath != obj.NodeVersion.FilePath {
|
2024-01-17 12:14:26 +00:00
|
|
|
if p.VersionIDMarker == "" {
|
|
|
|
return objects[j:], nil
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2024-01-17 14:26:02 +00:00
|
|
|
if objects[j].NodeVersion.OID.EncodeToString() == p.VersionIDMarker {
|
2024-01-17 12:14:26 +00:00
|
|
|
return objects[j+1:], nil
|
|
|
|
}
|
|
|
|
}
|
2024-09-27 09:18:41 +00:00
|
|
|
return nil, apierr.GetAPIError(apierr.ErrInvalidVersion)
|
2024-01-17 14:26:02 +00:00
|
|
|
} else if obj.NodeVersion.FilePath > p.KeyMarker {
|
2024-01-17 12:14:26 +00:00
|
|
|
if p.VersionIDMarker != "" {
|
2024-09-27 09:18:41 +00:00
|
|
|
return nil, apierr.GetAPIError(apierr.ErrInvalidVersion)
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
return objects[i:], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// don't use nil as empty slice to be consistent with `return objects[j+1:], nil` above
|
|
|
|
// that can be empty
|
2024-01-17 14:26:02 +00:00
|
|
|
return []*data.ExtendedNodeVersion{}, nil
|
2024-01-17 12:14:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
func triageVersions(objVersions []*data.ExtendedNodeVersion) ([]*data.ExtendedNodeVersion, []*data.ExtendedNodeVersion) {
|
2024-01-17 12:14:26 +00:00
|
|
|
if len(objVersions) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2024-01-17 14:26:02 +00:00
|
|
|
var resVersion []*data.ExtendedNodeVersion
|
|
|
|
var resDelMarkVersions []*data.ExtendedNodeVersion
|
2024-01-17 12:14:26 +00:00
|
|
|
|
|
|
|
for _, version := range objVersions {
|
2024-01-17 14:26:02 +00:00
|
|
|
if version.NodeVersion.IsDeleteMarker {
|
2024-01-17 12:14:26 +00:00
|
|
|
resDelMarkVersions = append(resDelMarkVersions, version)
|
|
|
|
} else {
|
|
|
|
resVersion = append(resVersion, version)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resVersion, resDelMarkVersions
|
|
|
|
}
|