2020-07-03 13:52:04 +00:00
|
|
|
package layer
|
|
|
|
|
|
|
|
import (
|
2023-10-02 08:52:07 +00:00
|
|
|
"bytes"
|
2020-07-03 13:52:04 +00:00
|
|
|
"context"
|
2023-10-02 08:52:07 +00:00
|
|
|
"crypto/md5"
|
2022-05-19 14:56:42 +00:00
|
|
|
"crypto/sha256"
|
2023-10-02 08:52:07 +00:00
|
|
|
"encoding/base64"
|
2022-05-12 16:58:11 +00:00
|
|
|
"encoding/hex"
|
2023-06-27 12:49:20 +00:00
|
|
|
"encoding/json"
|
2022-02-08 16:54:04 +00:00
|
|
|
"errors"
|
2022-03-01 15:07:15 +00:00
|
|
|
"fmt"
|
2020-07-03 13:52:04 +00:00
|
|
|
"io"
|
2022-06-29 10:51:05 +00:00
|
|
|
"mime"
|
|
|
|
"path/filepath"
|
2023-10-17 07:43:58 +00:00
|
|
|
"runtime"
|
2021-07-18 13:40:19 +00:00
|
|
|
"sort"
|
2022-08-01 16:52:09 +00:00
|
|
|
"strconv"
|
2021-08-10 10:03:09 +00:00
|
|
|
"strings"
|
2022-06-16 09:32:58 +00:00
|
|
|
"sync"
|
2020-07-03 13:52:04 +00:00
|
|
|
|
2023-03-07 14:38:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
2023-11-13 08:01:47 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
2023-03-07 14:38:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
|
|
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
2023-10-09 06:57:33 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
2023-08-23 11:07:52 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
2023-03-07 14:38:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
|
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
2022-08-01 16:52:09 +00:00
|
|
|
"github.com/minio/sio"
|
2022-06-16 09:32:58 +00:00
|
|
|
"github.com/panjf2000/ants/v2"
|
2021-07-18 13:40:19 +00:00
|
|
|
"go.uber.org/zap"
|
2020-07-03 13:52:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
2020-10-19 01:04:37 +00:00
|
|
|
getParams struct {
|
2022-02-08 16:54:04 +00:00
|
|
|
// payload range
|
|
|
|
off, ln uint64
|
|
|
|
|
2023-06-27 12:49:20 +00:00
|
|
|
objInfo *data.ObjectInfo
|
|
|
|
bktInfo *data.BucketInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
getFrostFSParams struct {
|
|
|
|
// payload range
|
|
|
|
off, ln uint64
|
|
|
|
|
2022-05-31 12:38:06 +00:00
|
|
|
oid oid.ID
|
2022-06-02 16:56:04 +00:00
|
|
|
bktInfo *data.BucketInfo
|
2020-07-03 13:52:04 +00:00
|
|
|
}
|
2021-07-18 13:40:19 +00:00
|
|
|
|
|
|
|
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
|
|
|
ListObjectsParamsCommon struct {
|
2022-03-18 13:04:09 +00:00
|
|
|
BktInfo *data.BucketInfo
|
2021-07-18 13:40:19 +00:00
|
|
|
Delimiter string
|
|
|
|
Encode string
|
|
|
|
MaxKeys int
|
|
|
|
Prefix string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
|
|
|
ListObjectsParamsV1 struct {
|
|
|
|
ListObjectsParamsCommon
|
|
|
|
Marker string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
|
|
|
ListObjectsParamsV2 struct {
|
|
|
|
ListObjectsParamsCommon
|
|
|
|
ContinuationToken string
|
|
|
|
StartAfter string
|
2021-07-20 12:40:38 +00:00
|
|
|
FetchOwner bool
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
allObjectParams struct {
|
2022-06-10 11:57:41 +00:00
|
|
|
Bucket *data.BucketInfo
|
|
|
|
Delimiter string
|
|
|
|
Prefix string
|
|
|
|
MaxKeys int
|
|
|
|
Marker string
|
|
|
|
ContinuationToken string
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
2023-10-17 11:21:39 +00:00
|
|
|
|
|
|
|
DeleteMarkerError struct {
|
|
|
|
ErrorCode apiErrors.ErrorCode
|
|
|
|
}
|
2020-07-03 13:52:04 +00:00
|
|
|
)
|
|
|
|
|
2023-10-17 11:21:39 +00:00
|
|
|
func (e DeleteMarkerError) Error() string {
|
|
|
|
return "object is delete marker"
|
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
const (
|
|
|
|
continuationToken = "<continuation-token>"
|
|
|
|
)
|
|
|
|
|
2022-05-25 17:25:43 +00:00
|
|
|
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|
|
|
var addr oid.Address
|
|
|
|
addr.SetContainer(cnr)
|
|
|
|
addr.SetObject(obj)
|
2022-02-08 16:54:04 +00:00
|
|
|
return addr
|
2021-08-18 13:48:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// objectHead returns all object's headers.
|
2022-06-01 17:35:20 +00:00
|
|
|
func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) {
|
2022-06-06 11:09:09 +00:00
|
|
|
prm := PrmObjectRead{
|
2022-06-01 17:35:20 +00:00
|
|
|
Container: bktInfo.CID,
|
2022-04-25 09:57:58 +00:00
|
|
|
Object: idObj,
|
2022-03-01 19:02:24 +00:00
|
|
|
WithHeader: true,
|
|
|
|
}
|
|
|
|
|
2022-06-01 17:35:20 +00:00
|
|
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
2022-02-08 16:54:04 +00:00
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
res, err := n.frostFS.ReadObject(ctx, prm)
|
2022-03-01 19:02:24 +00:00
|
|
|
if err != nil {
|
2022-09-13 10:35:30 +00:00
|
|
|
return nil, err
|
2022-03-01 19:02:24 +00:00
|
|
|
}
|
2022-02-08 16:54:04 +00:00
|
|
|
|
2022-03-01 19:02:24 +00:00
|
|
|
return res.Head, nil
|
2020-08-05 13:47:09 +00:00
|
|
|
}
|
|
|
|
|
2023-06-27 12:49:20 +00:00
|
|
|
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
|
|
|
if _, isCombined := p.objInfo.Headers[MultipartObjectSize]; !isCombined {
|
|
|
|
return n.initFrostFSObjectPayloadReader(ctx, getFrostFSParams{
|
|
|
|
off: p.off,
|
|
|
|
ln: p.ln,
|
|
|
|
oid: p.objInfo.ID,
|
|
|
|
bktInfo: p.bktInfo,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
combinedObj, err := n.objectGet(ctx, p.bktInfo, p.objInfo.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("get combined object '%s': %w", p.objInfo.ID.EncodeToString(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var parts []*data.PartInfo
|
|
|
|
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
|
|
|
|
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
isEncrypted := FormEncryptionInfo(p.objInfo.Headers).Enabled
|
|
|
|
objParts := make([]partObj, len(parts))
|
|
|
|
for i, part := range parts {
|
|
|
|
size := part.Size
|
|
|
|
if isEncrypted {
|
|
|
|
if size, err = sio.EncryptedSize(part.Size); err != nil {
|
|
|
|
return nil, fmt.Errorf("compute encrypted size: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
objParts[i] = partObj{
|
|
|
|
OID: part.OID,
|
|
|
|
Size: size,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:48:33 +00:00
|
|
|
return newMultiObjectReader(ctx, multiObjectReaderConfig{
|
|
|
|
layer: n,
|
|
|
|
off: p.off,
|
|
|
|
ln: p.ln,
|
|
|
|
parts: objParts,
|
|
|
|
bktInfo: p.bktInfo,
|
|
|
|
})
|
2023-06-27 12:49:20 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
// initializes payload reader of the FrostFS object.
|
2022-02-08 16:54:04 +00:00
|
|
|
// Zero range corresponds to full payload (panics if only offset is set).
|
2023-06-27 12:49:20 +00:00
|
|
|
func (n *layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
|
2022-06-06 11:09:09 +00:00
|
|
|
prm := PrmObjectRead{
|
2022-05-31 12:38:06 +00:00
|
|
|
Container: p.bktInfo.CID,
|
|
|
|
Object: p.oid,
|
2022-03-01 19:02:24 +00:00
|
|
|
WithPayload: true,
|
|
|
|
PayloadRange: [2]uint64{p.off, p.ln},
|
2022-02-08 16:54:04 +00:00
|
|
|
}
|
|
|
|
|
2022-06-02 16:56:04 +00:00
|
|
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, p.bktInfo.Owner)
|
2022-02-08 16:54:04 +00:00
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
res, err := n.frostFS.ReadObject(ctx, prm)
|
2022-03-02 16:09:02 +00:00
|
|
|
if err != nil {
|
2022-09-13 10:35:30 +00:00
|
|
|
return nil, err
|
2022-02-08 16:54:04 +00:00
|
|
|
}
|
|
|
|
|
2022-03-02 16:09:02 +00:00
|
|
|
return res.Payload, nil
|
2021-10-04 14:32:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// objectGet returns an object with payload in the object.
|
2022-06-01 17:35:20 +00:00
|
|
|
func (n *layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*object.Object, error) {
|
2022-06-06 11:09:09 +00:00
|
|
|
prm := PrmObjectRead{
|
2022-06-01 17:35:20 +00:00
|
|
|
Container: bktInfo.CID,
|
|
|
|
Object: objID,
|
2022-03-01 19:02:24 +00:00
|
|
|
WithHeader: true,
|
|
|
|
WithPayload: true,
|
2022-02-08 16:54:04 +00:00
|
|
|
}
|
|
|
|
|
2022-06-01 17:35:20 +00:00
|
|
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
2022-02-08 16:54:04 +00:00
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
res, err := n.frostFS.ReadObject(ctx, prm)
|
2022-02-08 16:54:04 +00:00
|
|
|
if err != nil {
|
2022-09-13 10:35:30 +00:00
|
|
|
return nil, err
|
2022-02-08 16:54:04 +00:00
|
|
|
}
|
|
|
|
|
2022-03-01 19:02:24 +00:00
|
|
|
return res.Head, nil
|
2021-06-24 11:10:00 +00:00
|
|
|
}
|
|
|
|
|
2022-09-07 06:59:24 +00:00
|
|
|
// MimeByFilePath detect mime type by file path extension.
|
|
|
|
func MimeByFilePath(path string) string {
|
|
|
|
ext := filepath.Ext(path)
|
2022-06-29 10:51:05 +00:00
|
|
|
if len(ext) == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return mime.TypeByExtension(ext)
|
|
|
|
}
|
|
|
|
|
2022-08-01 16:52:09 +00:00
|
|
|
func encryptionReader(r io.Reader, size uint64, key []byte) (io.Reader, uint64, error) {
|
|
|
|
encSize, err := sio.EncryptedSize(size)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to compute enc size: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
r, err = sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, MaxVersion: sio.Version20, Key: key, CipherSuites: []byte{sio.AES_256_GCM}})
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("couldn't create encrypter: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return r, encSize, nil
|
|
|
|
}
|
|
|
|
|
2022-08-11 08:48:58 +00:00
|
|
|
func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
2022-08-01 16:52:09 +00:00
|
|
|
// partInfo[0] -- part number, partInfo[1] -- part size, partInfo[2] -- checksum
|
|
|
|
partInfo := strings.Split(hdr, "-")
|
|
|
|
if len(partInfo) != 3 {
|
|
|
|
return nil, fmt.Errorf("invalid completed part header")
|
|
|
|
}
|
|
|
|
num, err := strconv.Atoi(partInfo[0])
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
|
|
|
|
}
|
2023-06-01 13:45:28 +00:00
|
|
|
size, err := strconv.ParseUint(partInfo[1], 10, 64)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &Part{
|
|
|
|
ETag: partInfo[2],
|
|
|
|
PartNumber: num,
|
2023-06-01 13:45:28 +00:00
|
|
|
Size: size,
|
2022-08-01 16:52:09 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
// PutObject stores object into FrostFS, took payload from io.Reader.
|
2022-10-14 14:36:43 +00:00
|
|
|
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) {
|
2022-07-19 14:58:18 +00:00
|
|
|
bktSettings, err := n.GetBucketSettings(ctx, p.BktInfo)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-08-18 13:48:58 +00:00
|
|
|
r := p.Reader
|
2022-08-01 16:52:09 +00:00
|
|
|
if p.Encryption.Enabled() {
|
2023-06-01 13:45:28 +00:00
|
|
|
p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
|
|
|
return nil, fmt.Errorf("add encryption header: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var encSize uint64
|
2023-06-01 13:45:28 +00:00
|
|
|
if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
|
2022-08-01 16:52:09 +00:00
|
|
|
return nil, fmt.Errorf("create encrypter: %w", err)
|
|
|
|
}
|
2023-06-01 13:45:28 +00:00
|
|
|
p.Size = encSize
|
2022-08-01 16:52:09 +00:00
|
|
|
}
|
|
|
|
|
2021-11-13 20:35:50 +00:00
|
|
|
if r != nil {
|
|
|
|
if len(p.Header[api.ContentType]) == 0 {
|
2022-09-07 06:59:24 +00:00
|
|
|
if contentType := MimeByFilePath(p.Object); len(contentType) == 0 {
|
2022-06-29 10:51:05 +00:00
|
|
|
d := newDetector(r)
|
2022-05-17 14:56:05 +00:00
|
|
|
if contentType, err := d.Detect(); err == nil {
|
2022-06-29 10:51:05 +00:00
|
|
|
p.Header[api.ContentType] = contentType
|
|
|
|
}
|
|
|
|
r = d.MultiReader()
|
|
|
|
} else {
|
2021-11-13 20:35:50 +00:00
|
|
|
p.Header[api.ContentType] = contentType
|
|
|
|
}
|
2021-08-18 13:48:58 +00:00
|
|
|
}
|
|
|
|
}
|
2021-08-10 08:19:09 +00:00
|
|
|
|
2022-06-06 11:09:09 +00:00
|
|
|
prm := PrmObjectCreate{
|
2022-08-11 22:48:56 +00:00
|
|
|
Container: p.BktInfo.CID,
|
2023-06-01 13:45:28 +00:00
|
|
|
PayloadSize: p.Size,
|
2022-09-07 06:59:24 +00:00
|
|
|
Filepath: p.Object,
|
2022-08-11 22:48:56 +00:00
|
|
|
Payload: r,
|
2022-11-08 09:12:55 +00:00
|
|
|
CreationTime: TimeNow(ctx),
|
2023-04-24 23:49:12 +00:00
|
|
|
CopiesNumber: p.CopiesNumbers,
|
2022-03-01 19:02:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
prm.Attributes = make([][2]string, 0, len(p.Header))
|
|
|
|
|
|
|
|
for k, v := range p.Header {
|
|
|
|
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
|
|
|
}
|
|
|
|
|
2023-10-02 08:52:07 +00:00
|
|
|
size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
2022-03-15 09:37:56 +00:00
|
|
|
if err != nil {
|
2022-04-12 06:52:44 +00:00
|
|
|
return nil, err
|
2022-03-15 09:37:56 +00:00
|
|
|
}
|
2023-10-02 08:52:07 +00:00
|
|
|
if len(p.ContentMD5) > 0 {
|
|
|
|
headerMd5Hash, err := base64.StdEncoding.DecodeString(p.ContentMD5)
|
|
|
|
if err != nil {
|
|
|
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(headerMd5Hash, md5Hash) {
|
|
|
|
err = n.objectDelete(ctx, p.BktInfo, id)
|
|
|
|
if err != nil {
|
|
|
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
|
|
|
}
|
|
|
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
|
|
|
}
|
|
|
|
}
|
2022-03-15 09:37:56 +00:00
|
|
|
|
2023-11-13 08:01:47 +00:00
|
|
|
if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
|
|
|
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(contentHashBytes, hash) {
|
|
|
|
err = n.objectDelete(ctx, p.BktInfo, id)
|
|
|
|
if err != nil {
|
|
|
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
|
|
|
}
|
|
|
|
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-23 11:07:52 +00:00
|
|
|
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
2022-12-09 13:30:51 +00:00
|
|
|
|
2023-06-01 13:45:28 +00:00
|
|
|
newVersion := &data.NodeVersion{
|
|
|
|
BaseNodeVersion: data.BaseNodeVersion{
|
|
|
|
OID: id,
|
|
|
|
ETag: hex.EncodeToString(hash),
|
|
|
|
FilePath: p.Object,
|
|
|
|
Size: size,
|
|
|
|
},
|
|
|
|
IsUnversioned: !bktSettings.VersioningEnabled(),
|
2023-06-27 12:49:20 +00:00
|
|
|
IsCombined: p.Header[MultipartObjectSize] != "",
|
2023-06-01 13:45:28 +00:00
|
|
|
}
|
2023-10-02 08:52:07 +00:00
|
|
|
if len(p.CompleteMD5Hash) > 0 {
|
|
|
|
newVersion.MD5 = p.CompleteMD5Hash
|
|
|
|
} else {
|
|
|
|
newVersion.MD5 = hex.EncodeToString(md5Hash)
|
|
|
|
}
|
2023-06-01 13:45:28 +00:00
|
|
|
|
2022-09-13 09:44:18 +00:00
|
|
|
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
2022-05-17 14:56:05 +00:00
|
|
|
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-05-26 13:11:14 +00:00
|
|
|
if p.Lock != nil && (p.Lock.Retention != nil || p.Lock.LegalHold != nil) {
|
2022-08-29 11:30:38 +00:00
|
|
|
putLockInfoPrms := &PutLockInfoParams{
|
2022-08-11 22:48:56 +00:00
|
|
|
ObjVersion: &ObjectVersion{
|
|
|
|
BktInfo: p.BktInfo,
|
|
|
|
ObjectName: p.Object,
|
|
|
|
VersionID: id.EncodeToString(),
|
|
|
|
},
|
2023-04-24 23:49:12 +00:00
|
|
|
NewLock: p.Lock,
|
|
|
|
CopiesNumbers: p.CopiesNumbers,
|
|
|
|
NodeVersion: newVersion, // provide new version to make one less tree service call in PutLockInfo
|
2022-03-01 15:07:15 +00:00
|
|
|
}
|
2022-05-26 13:11:14 +00:00
|
|
|
|
2022-08-29 11:30:38 +00:00
|
|
|
if err = n.PutLockInfo(ctx, putLockInfoPrms); err != nil {
|
2022-05-26 13:11:14 +00:00
|
|
|
return nil, err
|
2022-03-01 15:07:15 +00:00
|
|
|
}
|
2022-02-28 10:22:07 +00:00
|
|
|
}
|
|
|
|
|
2022-10-03 14:33:49 +00:00
|
|
|
n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
|
2021-09-01 16:10:52 +00:00
|
|
|
|
2022-05-24 14:55:56 +00:00
|
|
|
objInfo := &data.ObjectInfo{
|
2022-08-11 08:48:58 +00:00
|
|
|
ID: id,
|
|
|
|
CID: p.BktInfo.CID,
|
2021-08-13 14:13:14 +00:00
|
|
|
|
2023-08-03 12:08:22 +00:00
|
|
|
Owner: n.gateOwner,
|
2022-06-03 14:33:47 +00:00
|
|
|
Bucket: p.BktInfo.Name,
|
|
|
|
Name: p.Object,
|
2023-06-01 13:45:28 +00:00
|
|
|
Size: size,
|
2022-11-08 09:12:55 +00:00
|
|
|
Created: prm.CreationTime,
|
2022-06-03 14:33:47 +00:00
|
|
|
Headers: p.Header,
|
|
|
|
ContentType: p.Header[api.ContentType],
|
2022-07-18 14:51:34 +00:00
|
|
|
HashSum: newVersion.ETag,
|
2023-10-02 08:52:07 +00:00
|
|
|
MD5Sum: newVersion.MD5,
|
2022-05-24 14:55:56 +00:00
|
|
|
}
|
|
|
|
|
2022-08-08 22:32:18 +00:00
|
|
|
extendedObjInfo := &data.ExtendedObjectInfo{
|
|
|
|
ObjectInfo: objInfo,
|
|
|
|
NodeVersion: newVersion,
|
|
|
|
}
|
|
|
|
|
2023-08-03 12:08:22 +00:00
|
|
|
n.cache.PutObjectWithName(n.BearerOwner(ctx), extendedObjInfo)
|
2022-05-24 14:55:56 +00:00
|
|
|
|
2022-10-14 14:36:43 +00:00
|
|
|
return extendedObjInfo, nil
|
2021-08-13 14:13:14 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 13:35:05 +00:00
|
|
|
func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.BucketInfo, objectName string) (*data.ExtendedObjectInfo, error) {
|
2023-08-03 12:08:22 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
2022-10-03 14:33:49 +00:00
|
|
|
if extObjInfo := n.cache.GetLastObject(owner, bkt.Name, objectName); extObjInfo != nil {
|
|
|
|
return extObjInfo, nil
|
2021-08-18 13:48:58 +00:00
|
|
|
}
|
|
|
|
|
2022-09-13 09:44:18 +00:00
|
|
|
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
|
2021-08-13 14:13:14 +00:00
|
|
|
if err != nil {
|
2022-05-18 07:48:30 +00:00
|
|
|
if errors.Is(err, ErrNodeNotFound) {
|
2023-06-30 09:03:55 +00:00
|
|
|
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
2022-05-18 07:48:30 +00:00
|
|
|
}
|
2021-08-13 14:13:14 +00:00
|
|
|
return nil, err
|
2021-08-10 08:19:09 +00:00
|
|
|
}
|
|
|
|
|
2022-08-09 12:10:04 +00:00
|
|
|
if node.IsDeleteMarker() {
|
2023-10-17 11:21:39 +00:00
|
|
|
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrNoSuchKey}
|
2021-08-13 14:13:14 +00:00
|
|
|
}
|
2021-08-18 13:48:58 +00:00
|
|
|
|
2022-05-18 06:51:12 +00:00
|
|
|
meta, err := n.objectHead(ctx, bkt, node.OID)
|
2022-05-17 14:56:05 +00:00
|
|
|
if err != nil {
|
2023-06-29 12:46:42 +00:00
|
|
|
if client.IsErrObjectNotFound(err) {
|
|
|
|
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
|
|
|
}
|
2022-05-17 14:56:05 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-22 16:54:11 +00:00
|
|
|
objInfo := objectInfoFromMeta(bkt, meta)
|
2023-10-02 08:52:07 +00:00
|
|
|
objInfo.MD5Sum = node.MD5
|
2022-08-08 22:32:18 +00:00
|
|
|
|
|
|
|
extObjInfo := &data.ExtendedObjectInfo{
|
|
|
|
ObjectInfo: objInfo,
|
|
|
|
NodeVersion: node,
|
|
|
|
}
|
|
|
|
|
2022-10-03 14:33:49 +00:00
|
|
|
n.cache.PutObjectWithName(owner, extObjInfo)
|
2021-08-18 13:48:58 +00:00
|
|
|
|
2022-08-08 22:32:18 +00:00
|
|
|
return extObjInfo, nil
|
2021-08-10 08:19:09 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 13:35:05 +00:00
|
|
|
func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadObjectParams) (*data.ExtendedObjectInfo, error) {
|
2022-05-31 08:12:53 +00:00
|
|
|
var err error
|
|
|
|
var foundVersion *data.NodeVersion
|
2022-08-08 22:35:26 +00:00
|
|
|
if p.VersionID == data.UnversionedObjectVersionID {
|
2022-09-13 09:44:18 +00:00
|
|
|
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
|
2022-01-18 09:40:41 +00:00
|
|
|
if err != nil {
|
2022-05-31 08:12:53 +00:00
|
|
|
if errors.Is(err, ErrNodeNotFound) {
|
2023-06-30 09:03:55 +00:00
|
|
|
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
2022-05-31 08:12:53 +00:00
|
|
|
}
|
2022-01-18 09:40:41 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2022-05-31 08:12:53 +00:00
|
|
|
} else {
|
2022-09-13 09:44:18 +00:00
|
|
|
versions, err := n.treeService.GetVersions(ctx, bkt, p.Object)
|
2022-05-31 08:12:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("couldn't get versions: %w", err)
|
2022-01-18 09:40:41 +00:00
|
|
|
}
|
|
|
|
|
2022-05-31 08:12:53 +00:00
|
|
|
for _, version := range versions {
|
|
|
|
if version.OID.EncodeToString() == p.VersionID {
|
|
|
|
foundVersion = version
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if foundVersion == nil {
|
2023-06-30 09:03:55 +00:00
|
|
|
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion))
|
2022-05-18 07:48:30 +00:00
|
|
|
}
|
2021-08-10 10:03:09 +00:00
|
|
|
}
|
|
|
|
|
2023-08-03 12:08:22 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
2022-10-03 14:33:49 +00:00
|
|
|
if extObjInfo := n.cache.GetObject(owner, newAddress(bkt.CID, foundVersion.OID)); extObjInfo != nil {
|
2022-08-08 22:32:18 +00:00
|
|
|
return extObjInfo, nil
|
2021-08-18 13:48:58 +00:00
|
|
|
}
|
|
|
|
|
2023-10-17 11:21:39 +00:00
|
|
|
if foundVersion.IsDeleteMarker() {
|
|
|
|
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrMethodNotAllowed}
|
|
|
|
}
|
|
|
|
|
2022-05-18 07:48:30 +00:00
|
|
|
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
2021-08-10 12:08:15 +00:00
|
|
|
if err != nil {
|
2022-04-21 08:49:56 +00:00
|
|
|
if client.IsErrObjectNotFound(err) {
|
2023-06-30 09:03:55 +00:00
|
|
|
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
2021-08-10 10:03:09 +00:00
|
|
|
}
|
2021-08-10 12:08:15 +00:00
|
|
|
return nil, err
|
2021-08-10 10:03:09 +00:00
|
|
|
}
|
2022-07-22 16:54:11 +00:00
|
|
|
objInfo := objectInfoFromMeta(bkt, meta)
|
2023-10-02 08:52:07 +00:00
|
|
|
objInfo.MD5Sum = foundVersion.MD5
|
2022-08-08 22:32:18 +00:00
|
|
|
|
|
|
|
extObjInfo := &data.ExtendedObjectInfo{
|
|
|
|
ObjectInfo: objInfo,
|
|
|
|
NodeVersion: foundVersion,
|
|
|
|
}
|
|
|
|
|
2022-10-03 14:33:49 +00:00
|
|
|
n.cache.PutObject(owner, extObjInfo)
|
2021-08-18 13:48:58 +00:00
|
|
|
|
2022-08-08 22:32:18 +00:00
|
|
|
return extObjInfo, nil
|
2020-07-03 13:52:04 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
// objectDelete puts tombstone object into frostfs.
|
2022-06-01 17:35:20 +00:00
|
|
|
func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
|
2022-06-06 11:09:09 +00:00
|
|
|
prm := PrmObjectDelete{
|
2022-06-01 17:35:20 +00:00
|
|
|
Container: bktInfo.CID,
|
2022-05-25 17:25:43 +00:00
|
|
|
Object: idObj,
|
2022-03-01 19:02:24 +00:00
|
|
|
}
|
|
|
|
|
2022-06-01 17:35:20 +00:00
|
|
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
2022-03-01 19:02:24 +00:00
|
|
|
|
2022-10-03 14:33:49 +00:00
|
|
|
n.cache.DeleteObject(newAddress(bktInfo.CID, idObj))
|
2022-03-01 19:02:24 +00:00
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
return n.frostFS.DeleteObject(ctx, prm)
|
2020-07-03 13:52:04 +00:00
|
|
|
}
|
2021-07-18 13:40:19 +00:00
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
2022-05-19 14:56:42 +00:00
|
|
|
// Returns object ID and payload sha256 hash.
|
2023-10-02 08:52:07 +00:00
|
|
|
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) {
|
2022-06-01 17:35:20 +00:00
|
|
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
2023-07-14 12:30:47 +00:00
|
|
|
prm.ClientCut = n.features.ClientCut()
|
2023-08-25 10:07:59 +00:00
|
|
|
prm.BufferMaxSize = n.features.BufferMaxSizeForPut()
|
2023-08-25 10:06:43 +00:00
|
|
|
prm.WithoutHomomorphicHash = bktInfo.HomomorphicHashDisabled
|
2023-06-01 13:45:28 +00:00
|
|
|
var size uint64
|
2022-05-19 14:56:42 +00:00
|
|
|
hash := sha256.New()
|
2023-10-02 08:52:07 +00:00
|
|
|
md5Hash := md5.New()
|
2022-05-19 14:56:42 +00:00
|
|
|
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
2023-06-01 13:45:28 +00:00
|
|
|
size += uint64(len(buf))
|
2022-05-19 14:56:42 +00:00
|
|
|
hash.Write(buf)
|
2023-10-02 08:52:07 +00:00
|
|
|
md5Hash.Write(buf)
|
2022-05-19 14:56:42 +00:00
|
|
|
})
|
2022-12-20 08:38:58 +00:00
|
|
|
id, err := n.frostFS.CreateObject(ctx, prm)
|
2022-11-16 10:32:27 +00:00
|
|
|
if err != nil {
|
2023-08-01 14:57:41 +00:00
|
|
|
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
|
2023-08-23 11:07:52 +00:00
|
|
|
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
|
2023-08-01 14:57:41 +00:00
|
|
|
}
|
|
|
|
|
2023-10-02 08:52:07 +00:00
|
|
|
return 0, oid.ID{}, nil, nil, err
|
2022-11-16 10:32:27 +00:00
|
|
|
}
|
2023-10-02 08:52:07 +00:00
|
|
|
return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
|
2022-05-19 14:56:42 +00:00
|
|
|
}
|
|
|
|
|
2021-07-18 13:40:19 +00:00
|
|
|
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
|
|
|
func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
2022-06-10 11:57:41 +00:00
|
|
|
var result ListObjectsInfoV1
|
2021-07-18 13:40:19 +00:00
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
prm := allObjectParams{
|
|
|
|
Bucket: p.BktInfo,
|
|
|
|
Delimiter: p.Delimiter,
|
|
|
|
Prefix: p.Prefix,
|
|
|
|
MaxKeys: p.MaxKeys,
|
|
|
|
Marker: p.Marker,
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
|
|
|
if err != nil {
|
2021-07-22 08:57:37 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
if next != nil {
|
2021-07-18 13:40:19 +00:00
|
|
|
result.IsTruncated = true
|
2022-06-10 11:57:41 +00:00
|
|
|
result.NextMarker = objects[len(objects)-1].Name
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
result.Prefixes, result.Objects = triageObjects(objects)
|
2021-07-18 13:40:19 +00:00
|
|
|
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
|
|
|
func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
2022-06-10 11:57:41 +00:00
|
|
|
var result ListObjectsInfoV2
|
2021-07-18 13:40:19 +00:00
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
prm := allObjectParams{
|
|
|
|
Bucket: p.BktInfo,
|
|
|
|
Delimiter: p.Delimiter,
|
|
|
|
Prefix: p.Prefix,
|
|
|
|
MaxKeys: p.MaxKeys,
|
|
|
|
Marker: p.StartAfter,
|
|
|
|
ContinuationToken: p.ContinuationToken,
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
objects, next, err := n.getLatestObjectsVersionsV2(ctx, prm)
|
2022-06-10 11:57:41 +00:00
|
|
|
if err != nil {
|
2021-07-22 08:57:37 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
if next != nil {
|
2021-07-18 13:40:19 +00:00
|
|
|
result.IsTruncated = true
|
2022-06-10 11:57:41 +00:00
|
|
|
result.NextContinuationToken = next.ID.EncodeToString()
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
result.Prefixes, result.Objects = triageObjects(objects)
|
2021-07-29 08:14:14 +00:00
|
|
|
|
2021-07-18 13:40:19 +00:00
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
type logWrapper struct {
|
|
|
|
log *zap.Logger
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *logWrapper) Printf(format string, args ...interface{}) {
|
|
|
|
l.log.Info(fmt.Sprintf(format, args...))
|
|
|
|
}
|
|
|
|
|
2023-10-17 07:43:58 +00:00
|
|
|
func PrintMemUsage() {
|
|
|
|
var m runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&m)
|
|
|
|
// For info on each, see: https://golang.org/pkg/runtime/#MemStats
|
|
|
|
fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc))
|
|
|
|
fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc))
|
|
|
|
fmt.Printf("\tSys = %v MiB", bToMb(m.Sys))
|
|
|
|
fmt.Printf("\tNumGC = %v\n", m.NumGC)
|
|
|
|
}
|
|
|
|
|
|
|
|
func bToMb(b uint64) uint64 {
|
|
|
|
return b / 1024 / 1024
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) {
|
2022-06-10 11:57:41 +00:00
|
|
|
if p.MaxKeys == 0 {
|
|
|
|
return nil, nil, nil
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
|
|
|
|
2023-08-03 12:08:22 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
2022-06-27 09:08:26 +00:00
|
|
|
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
|
2022-10-03 14:33:49 +00:00
|
|
|
nodeVersions := n.cache.GetList(owner, cacheKey)
|
2022-05-20 08:26:35 +00:00
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
if nodeVersions == nil {
|
2022-09-13 09:44:18 +00:00
|
|
|
nodeVersions, err = n.treeService.GetLatestVersionsByPrefix(ctx, p.Bucket, p.Prefix)
|
2022-05-20 08:26:35 +00:00
|
|
|
if err != nil {
|
2022-06-10 11:57:41 +00:00
|
|
|
return nil, nil, err
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
2022-10-03 14:33:49 +00:00
|
|
|
n.cache.PutList(owner, cacheKey, nodeVersions)
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2022-06-24 09:01:39 +00:00
|
|
|
if len(nodeVersions) == 0 {
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
sort.Slice(nodeVersions, func(i, j int) bool {
|
|
|
|
return nodeVersions[i].FilePath < nodeVersions[j].FilePath
|
|
|
|
})
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
poolCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
objOutCh, err := n.initWorkerPool(poolCtx, 2, p, nodesGenerator(poolCtx, p, nodeVersions))
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
objects = make([]*data.ObjectInfo, 0, p.MaxKeys)
|
2022-06-10 11:57:41 +00:00
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
for obj := range objOutCh {
|
|
|
|
objects = append(objects, obj)
|
|
|
|
}
|
|
|
|
|
2023-10-17 07:43:58 +00:00
|
|
|
//for node := range nodesGenerator(poolCtx, p, nodeVersions) {
|
|
|
|
// objects = append(objects, &data.ObjectInfo{
|
|
|
|
// ID: node.OID,
|
|
|
|
// IsDir: false,
|
|
|
|
// IsDeleteMarker: node.IsDeleteMarker(),
|
|
|
|
// Name: node.FilePath,
|
|
|
|
// Size: node.Size,
|
|
|
|
// Created: time.Time{},
|
|
|
|
// HashSum: node.ETag,
|
|
|
|
// Owner: user.ID{},
|
|
|
|
// Headers: nil,
|
|
|
|
// })
|
|
|
|
//}
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
sort.Slice(objects, func(i, j int) bool {
|
|
|
|
return objects[i].Name < objects[j].Name
|
|
|
|
})
|
|
|
|
|
2022-07-25 13:46:36 +00:00
|
|
|
if len(objects) > p.MaxKeys {
|
|
|
|
next = objects[p.MaxKeys]
|
|
|
|
objects = objects[:p.MaxKeys]
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
return
|
|
|
|
}
|
2022-05-20 08:26:35 +00:00
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
func (n *layer) getLatestObjectsVersionsV2(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) {
|
|
|
|
if p.MaxKeys == 0 {
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
|
|
|
|
2023-10-16 06:27:21 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
2023-10-17 07:43:58 +00:00
|
|
|
cacheKey := cache.CreateListSessionCacheKey(p.Bucket.CID, p.Prefix, p.ContinuationToken)
|
2023-10-16 06:27:21 +00:00
|
|
|
session := n.cache.GetListSession(owner, cacheKey)
|
|
|
|
if session != nil {
|
|
|
|
// after reading next object from stream in session
|
|
|
|
// the current cache value already doesn't match with next token in cache key
|
|
|
|
n.cache.DeleteListSession(owner, cacheKey)
|
2023-10-09 06:57:33 +00:00
|
|
|
} else {
|
2023-10-16 06:27:21 +00:00
|
|
|
session = &data.ListSession{NamesMap: make(map[string]struct{})}
|
|
|
|
session.Context, session.Cancel = context.WithCancel(context.Background())
|
2023-10-09 06:57:33 +00:00
|
|
|
|
|
|
|
if bd, err := middleware.GetBoxData(ctx); err == nil {
|
2023-10-16 06:27:21 +00:00
|
|
|
session.Context = middleware.SetBoxData(session.Context, bd)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
2023-10-16 06:27:21 +00:00
|
|
|
session.Stream, err = n.treeService.GetLatestVersionsByPrefixStream(session.Context, p.Bucket, p.Prefix)
|
2023-10-09 06:57:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
poolCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2023-10-16 06:27:21 +00:00
|
|
|
generator, errorCh := nodesGeneratorStream(poolCtx, p, session)
|
2023-10-09 06:57:33 +00:00
|
|
|
objOutCh, err := n.initWorkerPoolStream(poolCtx, 2, p, generator)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
objects = make([]*data.ObjectInfo, 0, p.MaxKeys+1)
|
2023-10-16 06:27:21 +00:00
|
|
|
if session.Next != nil {
|
|
|
|
objects = append(objects, session.Next)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for obj := range objOutCh {
|
|
|
|
objects = append(objects, obj)
|
|
|
|
}
|
|
|
|
|
2023-10-17 07:43:58 +00:00
|
|
|
//for node := range generator {
|
|
|
|
// objects = append(objects, &data.ObjectInfo{
|
|
|
|
// ID: node.OID,
|
|
|
|
// IsDir: false,
|
|
|
|
// IsDeleteMarker: node.IsDeleteMarker(),
|
|
|
|
// Name: node.FilePath,
|
|
|
|
// Size: node.Size,
|
|
|
|
// Created: time.Time{},
|
|
|
|
// HashSum: node.ETag,
|
|
|
|
// Owner: user.ID{},
|
|
|
|
// Headers: nil,
|
|
|
|
// })
|
|
|
|
//}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
if err = <-errorCh; err != nil {
|
2023-10-16 06:27:21 +00:00
|
|
|
return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(objects, func(i, j int) bool {
|
|
|
|
return objects[i].Name < objects[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(objects) > p.MaxKeys {
|
|
|
|
next = objects[p.MaxKeys]
|
|
|
|
objects = objects[:p.MaxKeys]
|
|
|
|
}
|
|
|
|
|
|
|
|
if next != nil {
|
2023-10-16 06:27:21 +00:00
|
|
|
session.Next = next
|
|
|
|
n.cache.PutListSession(owner, cache.CreateListSessionCacheKey(p.Bucket.CID, p.Prefix, next.VersionID()), session)
|
2023-10-09 06:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion {
|
|
|
|
nodeCh := make(chan *data.NodeVersion)
|
|
|
|
existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories
|
|
|
|
|
|
|
|
go func() {
|
2022-08-13 10:07:57 +00:00
|
|
|
var generated int
|
2022-06-16 09:32:58 +00:00
|
|
|
LOOP:
|
|
|
|
for _, node := range nodeVersions {
|
|
|
|
if shouldSkip(node, p, existed) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
case nodeCh <- node:
|
2022-08-13 10:07:57 +00:00
|
|
|
generated++
|
|
|
|
if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
|
|
|
break LOOP
|
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-16 09:32:58 +00:00
|
|
|
close(nodeCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nodeCh
|
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
func nodesGeneratorVersions(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion {
|
|
|
|
nodeCh := make(chan *data.NodeVersion)
|
|
|
|
existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var generated int
|
|
|
|
LOOP:
|
|
|
|
for _, node := range nodeVersions {
|
|
|
|
if shouldSkipVersions(node, p, existed) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
case nodeCh <- node:
|
|
|
|
generated++
|
|
|
|
if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
|
|
|
break LOOP
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(nodeCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nodeCh
|
|
|
|
}
|
|
|
|
|
2023-10-16 06:27:21 +00:00
|
|
|
func nodesGeneratorStream(ctx context.Context, p allObjectParams, stream *data.ListSession) (<-chan *data.NodeVersion, <-chan error) {
|
2023-10-17 07:43:58 +00:00
|
|
|
nodeCh := make(chan *data.NodeVersion, 1000)
|
2023-10-09 06:57:33 +00:00
|
|
|
errCh := make(chan error, 1)
|
|
|
|
//existed := make(map[string]struct{}, p.MaxKeys) // to squash the same directories
|
|
|
|
existed := stream.NamesMap
|
|
|
|
|
|
|
|
if stream.Next != nil {
|
|
|
|
existed[continuationToken] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
limit := p.MaxKeys
|
|
|
|
if stream.Next == nil {
|
|
|
|
limit++
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var generated int
|
|
|
|
var err error
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for err == nil {
|
|
|
|
node, err := stream.Stream.Next(ctx)
|
|
|
|
if err != nil {
|
|
|
|
if !errors.Is(err, io.EOF) {
|
|
|
|
fmt.Println(ctx.Err())
|
|
|
|
errCh <- fmt.Errorf("stream next: %w", err)
|
|
|
|
}
|
|
|
|
break LOOP
|
|
|
|
}
|
|
|
|
|
|
|
|
if shouldSkip(node, p, existed) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
case nodeCh <- node:
|
|
|
|
generated++
|
|
|
|
|
|
|
|
if generated == limit { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
|
|
|
break LOOP
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(nodeCh)
|
|
|
|
close(errCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nodeCh, errCh
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
2023-06-09 13:19:23 +00:00
|
|
|
reqLog := n.reqLogger(ctx)
|
|
|
|
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
2022-06-16 09:32:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
2022-06-16 09:32:58 +00:00
|
|
|
objCh := make(chan *data.ObjectInfo)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for node := range input {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2022-06-22 09:59:26 +00:00
|
|
|
// We have to make a copy of pointer to data.NodeVersion
|
|
|
|
// to get correct value in submitted task function.
|
2022-06-16 09:32:58 +00:00
|
|
|
func(node *data.NodeVersion) {
|
|
|
|
wg.Add(1)
|
|
|
|
err = pool.Submit(func() {
|
|
|
|
defer wg.Done()
|
2022-12-20 08:38:58 +00:00
|
|
|
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
2022-08-13 10:07:57 +00:00
|
|
|
if oi == nil {
|
|
|
|
// try to get object again
|
2022-12-20 08:38:58 +00:00
|
|
|
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
2023-05-26 15:30:25 +00:00
|
|
|
// do not process object which are definitely missing in object service
|
|
|
|
return
|
2022-06-16 09:32:58 +00:00
|
|
|
}
|
|
|
|
}
|
2022-08-13 10:07:57 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case objCh <- oi:
|
|
|
|
}
|
2022-06-16 09:32:58 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
wg.Done()
|
2023-08-23 11:07:52 +00:00
|
|
|
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
2022-06-16 09:32:58 +00:00
|
|
|
}
|
|
|
|
}(node)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(objCh)
|
|
|
|
pool.Release()
|
|
|
|
}()
|
2022-05-20 08:26:35 +00:00
|
|
|
|
2022-06-16 09:32:58 +00:00
|
|
|
return objCh, nil
|
2022-05-20 08:26:35 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
func (n *layer) initWorkerPoolVersions(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ExtendedObjectInfo, error) {
|
|
|
|
reqLog := n.reqLogger(ctx)
|
|
|
|
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
|
|
|
}
|
|
|
|
objCh := make(chan *data.ExtendedObjectInfo)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for node := range input {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have to make a copy of pointer to data.NodeVersion
|
|
|
|
// to get correct value in submitted task function.
|
|
|
|
func(node *data.NodeVersion) {
|
|
|
|
wg.Add(1)
|
|
|
|
err = pool.Submit(func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
oi := &data.ObjectInfo{}
|
|
|
|
if node.IsDeleteMarker() { // delete marker does not match any object in FrostFS
|
|
|
|
oi.ID = node.OID
|
|
|
|
oi.Name = node.FilePath
|
|
|
|
oi.Owner = node.DeleteMarker.Owner
|
|
|
|
oi.Created = node.DeleteMarker.Created
|
|
|
|
oi.IsDeleteMarker = true
|
|
|
|
} else {
|
|
|
|
oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
|
|
|
if oi == nil {
|
|
|
|
// try to get object again
|
|
|
|
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
|
|
|
// do not process object which are definitely missing in object service
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
eoi := &data.ExtendedObjectInfo{
|
|
|
|
ObjectInfo: oi,
|
|
|
|
NodeVersion: node,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case objCh <- eoi:
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
wg.Done()
|
|
|
|
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
|
|
|
}
|
|
|
|
}(node)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(objCh)
|
|
|
|
pool.Release()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return objCh, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *layer) initWorkerPoolStream(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
|
|
|
reqLog := n.reqLogger(ctx)
|
|
|
|
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
|
|
|
}
|
|
|
|
objCh := make(chan *data.ObjectInfo)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
LOOP:
|
|
|
|
for node := range input {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break LOOP
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have to make a copy of pointer to data.NodeVersion
|
|
|
|
// to get correct value in submitted task function.
|
|
|
|
func(node *data.NodeVersion) {
|
|
|
|
wg.Add(1)
|
|
|
|
err = pool.Submit(func() {
|
|
|
|
defer wg.Done()
|
|
|
|
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
|
|
|
if oi == nil {
|
|
|
|
// try to get object again
|
|
|
|
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
|
|
|
// do not process object which are definitely missing in object service
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case objCh <- oi:
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
wg.Done()
|
|
|
|
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
|
|
|
}
|
|
|
|
}(node)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(objCh)
|
|
|
|
pool.Release()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return objCh, nil
|
|
|
|
}
|
|
|
|
|
2022-07-21 09:05:47 +00:00
|
|
|
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
2021-09-01 16:10:31 +00:00
|
|
|
var err error
|
|
|
|
|
2023-08-03 12:08:22 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
2022-06-27 09:08:26 +00:00
|
|
|
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
|
2022-10-03 14:33:49 +00:00
|
|
|
nodeVersions := n.cache.GetList(owner, cacheKey)
|
2021-09-01 16:10:31 +00:00
|
|
|
|
2022-05-20 15:02:00 +00:00
|
|
|
if nodeVersions == nil {
|
2022-09-13 09:44:18 +00:00
|
|
|
nodeVersions, err = n.treeService.GetAllVersionsByPrefix(ctx, bkt, prefix)
|
2021-09-01 16:10:31 +00:00
|
|
|
if err != nil {
|
2022-07-21 09:05:47 +00:00
|
|
|
return nil, fmt.Errorf("get all versions from tree service: %w", err)
|
2021-09-01 16:10:31 +00:00
|
|
|
}
|
2022-10-03 14:33:49 +00:00
|
|
|
|
|
|
|
n.cache.PutList(owner, cacheKey, nodeVersions)
|
2021-08-18 13:48:58 +00:00
|
|
|
}
|
2021-07-18 13:40:19 +00:00
|
|
|
|
2022-07-21 09:05:47 +00:00
|
|
|
return nodeVersions, nil
|
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
func (n *layer) getAllObjectsVersions(ctx context.Context, p *ListObjectVersionsParams) (map[string][]*data.ExtendedObjectInfo, error) {
|
|
|
|
nodeVersions, err := n.bucketNodeVersions(ctx, p.BktInfo, p.Prefix)
|
2022-07-21 09:05:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-20 15:02:00 +00:00
|
|
|
versions := make(map[string][]*data.ExtendedObjectInfo, len(nodeVersions))
|
2021-09-01 16:10:31 +00:00
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
sort.Slice(nodeVersions, func(i, j int) bool {
|
|
|
|
return nodeVersions[i].FilePath < nodeVersions[j].FilePath
|
|
|
|
})
|
2022-05-20 15:02:00 +00:00
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
poolCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
2022-05-20 15:02:00 +00:00
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
pp := allObjectParams{
|
|
|
|
Bucket: p.BktInfo,
|
|
|
|
Delimiter: p.Delimiter,
|
|
|
|
Prefix: p.Prefix,
|
|
|
|
Marker: p.KeyMarker,
|
|
|
|
ContinuationToken: p.VersionIDMarker,
|
|
|
|
MaxKeys: p.MaxKeys,
|
|
|
|
}
|
|
|
|
|
|
|
|
objOutCh, err := n.initWorkerPoolVersions(poolCtx, 2, pp, nodesGeneratorVersions(poolCtx, pp, nodeVersions))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-05-20 15:02:00 +00:00
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
for eoi := range objOutCh {
|
|
|
|
objVersions, ok := versions[eoi.ObjectInfo.Name]
|
2022-05-20 15:02:00 +00:00
|
|
|
if !ok {
|
|
|
|
objVersions = []*data.ExtendedObjectInfo{eoi}
|
2023-10-09 06:57:33 +00:00
|
|
|
} else if !eoi.ObjectInfo.IsDir {
|
2022-05-20 15:02:00 +00:00
|
|
|
objVersions = append(objVersions, eoi)
|
|
|
|
}
|
2023-10-09 06:57:33 +00:00
|
|
|
versions[eoi.ObjectInfo.Name] = objVersions
|
2021-08-13 14:13:14 +00:00
|
|
|
}
|
2021-07-18 13:40:19 +00:00
|
|
|
|
2021-08-18 13:48:58 +00:00
|
|
|
return versions, nil
|
2021-07-18 13:40:19 +00:00
|
|
|
}
|
2021-07-28 13:28:05 +00:00
|
|
|
|
2022-01-20 14:36:21 +00:00
|
|
|
func IsSystemHeader(key string) bool {
|
2022-07-26 01:08:25 +00:00
|
|
|
_, ok := api.SystemMetadata[key]
|
2022-12-20 08:38:58 +00:00
|
|
|
return ok || strings.HasPrefix(key, api.FrostFSSystemMetadataPrefix)
|
2022-01-20 14:36:21 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 11:57:41 +00:00
|
|
|
func shouldSkip(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool {
|
2022-08-13 10:03:49 +00:00
|
|
|
if node.IsDeleteMarker() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
filePath := node.FilePath
|
|
|
|
if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 {
|
|
|
|
filePath = dirName
|
2021-07-28 13:28:05 +00:00
|
|
|
}
|
2022-08-13 10:03:49 +00:00
|
|
|
if _, ok := existed[filePath]; ok {
|
2022-06-10 11:57:41 +00:00
|
|
|
return true
|
|
|
|
}
|
2021-07-29 08:14:14 +00:00
|
|
|
|
2022-08-13 10:03:49 +00:00
|
|
|
if filePath <= p.Marker {
|
2022-06-10 11:57:41 +00:00
|
|
|
return true
|
2021-07-29 08:14:14 +00:00
|
|
|
}
|
2022-06-10 11:57:41 +00:00
|
|
|
|
|
|
|
if p.ContinuationToken != "" {
|
|
|
|
if _, ok := existed[continuationToken]; !ok {
|
|
|
|
if p.ContinuationToken != node.OID.EncodeToString() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
existed[continuationToken] = struct{}{}
|
2021-07-29 08:14:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-13 10:03:49 +00:00
|
|
|
existed[filePath] = struct{}{}
|
2022-06-10 11:57:41 +00:00
|
|
|
return false
|
2021-07-29 08:14:14 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 06:57:33 +00:00
|
|
|
func shouldSkipVersions(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool {
|
|
|
|
filePath := node.FilePath
|
|
|
|
if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 {
|
|
|
|
filePath = dirName
|
|
|
|
if _, ok := existed[filePath]; ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if filePath < p.Marker {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.ContinuationToken != "" {
|
|
|
|
if _, ok := existed[continuationToken]; !ok {
|
|
|
|
if p.ContinuationToken != node.OID.EncodeToString() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
existed[continuationToken] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
existed[filePath] = struct{}{}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-09-10 06:56:56 +00:00
|
|
|
func triageObjects(allObjects []*data.ObjectInfo) (prefixes []string, objects []*data.ObjectInfo) {
|
2021-07-29 08:14:14 +00:00
|
|
|
for _, ov := range allObjects {
|
2021-08-27 21:33:50 +00:00
|
|
|
if ov.IsDir {
|
2021-07-29 08:14:14 +00:00
|
|
|
prefixes = append(prefixes, ov.Name)
|
|
|
|
} else {
|
|
|
|
objects = append(objects, ov)
|
2021-07-28 13:28:05 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-29 08:14:14 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-31 15:03:58 +00:00
|
|
|
func triageExtendedObjects(allObjects []*data.ExtendedObjectInfo) (prefixes []string, objects []*data.ExtendedObjectInfo) {
|
|
|
|
for _, ov := range allObjects {
|
|
|
|
if ov.ObjectInfo.IsDir {
|
|
|
|
prefixes = append(prefixes, ov.ObjectInfo.Name)
|
|
|
|
} else {
|
|
|
|
objects = append(objects, ov)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-20 08:38:58 +00:00
|
|
|
func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) {
|
2022-08-13 10:03:49 +00:00
|
|
|
if oiDir := tryDirectory(bktInfo, node, prefix, delimiter); oiDir != nil {
|
|
|
|
return oiDir
|
|
|
|
}
|
|
|
|
|
2023-08-03 12:08:22 +00:00
|
|
|
owner := n.BearerOwner(ctx)
|
2022-10-03 14:33:49 +00:00
|
|
|
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
|
|
|
return extInfo.ObjectInfo
|
2022-08-08 22:32:18 +00:00
|
|
|
}
|
2022-07-22 16:54:11 +00:00
|
|
|
|
2022-08-08 22:32:18 +00:00
|
|
|
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
|
|
|
if err != nil {
|
2023-08-23 11:07:52 +00:00
|
|
|
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
2022-08-08 22:32:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
2022-06-24 09:01:39 +00:00
|
|
|
|
2022-08-08 22:32:18 +00:00
|
|
|
oi = objectInfoFromMeta(bktInfo, meta)
|
2023-10-02 08:52:07 +00:00
|
|
|
oi.MD5Sum = node.MD5
|
2022-10-03 14:33:49 +00:00
|
|
|
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
2021-09-01 16:10:31 +00:00
|
|
|
|
2022-08-13 10:07:57 +00:00
|
|
|
return oi
|
2021-09-01 16:10:31 +00:00
|
|
|
}
|
2022-01-21 12:06:39 +00:00
|
|
|
|
2022-08-13 10:03:49 +00:00
|
|
|
func tryDirectory(bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) *data.ObjectInfo {
|
|
|
|
dirName := tryDirectoryName(node, prefix, delimiter)
|
|
|
|
if len(dirName) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &data.ObjectInfo{
|
2022-08-29 11:50:48 +00:00
|
|
|
ID: node.OID, // to use it as continuation token
|
2022-08-13 10:03:49 +00:00
|
|
|
CID: bktInfo.CID,
|
|
|
|
IsDir: true,
|
|
|
|
IsDeleteMarker: node.IsDeleteMarker(),
|
|
|
|
Bucket: bktInfo.Name,
|
|
|
|
Name: dirName,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// tryDirectoryName forms directory name by prefix and delimiter.
|
|
|
|
// If node isn't a directory empty string is returned.
|
|
|
|
// This function doesn't check if node has a prefix. It must do a caller.
|
|
|
|
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
|
|
|
if len(delimiter) == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
tail := strings.TrimPrefix(node.FilePath, prefix)
|
|
|
|
index := strings.Index(tail, delimiter)
|
|
|
|
if index >= 0 {
|
|
|
|
return prefix + tail[:index+1]
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2022-05-19 14:56:42 +00:00
|
|
|
func wrapReader(input io.Reader, bufSize int, f func(buf []byte)) io.Reader {
|
2022-05-25 15:59:36 +00:00
|
|
|
if input == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-19 14:56:42 +00:00
|
|
|
r, w := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
var buf = make([]byte, bufSize)
|
|
|
|
for {
|
|
|
|
n, err := input.Read(buf)
|
|
|
|
if n > 0 {
|
|
|
|
f(buf[:n])
|
|
|
|
_, _ = w.Write(buf[:n]) // ignore error, input is not ReadCloser
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
_ = w.CloseWithError(err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return r
|
|
|
|
}
|