package layer import ( "bytes" "context" "crypto/md5" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "mime" "path/filepath" "runtime" "sort" "strconv" "strings" "sync" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/minio/sio" "github.com/panjf2000/ants/v2" "go.uber.org/zap" ) type ( getParams struct { // payload range off, ln uint64 objInfo *data.ObjectInfo bktInfo *data.BucketInfo } getFrostFSParams struct { // payload range off, ln uint64 oid oid.ID bktInfo *data.BucketInfo } // ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2. ListObjectsParamsCommon struct { BktInfo *data.BucketInfo Delimiter string Encode string MaxKeys int Prefix string } // ListObjectsParamsV1 contains params for ListObjectsV1. ListObjectsParamsV1 struct { ListObjectsParamsCommon Marker string } // ListObjectsParamsV2 contains params for ListObjectsV2. ListObjectsParamsV2 struct { ListObjectsParamsCommon ContinuationToken string StartAfter string FetchOwner bool } allObjectParams struct { Bucket *data.BucketInfo Delimiter string Prefix string MaxKeys int Marker string ContinuationToken string } DeleteMarkerError struct { ErrorCode apiErrors.ErrorCode } ) func (e DeleteMarkerError) Error() string { return "object is delete marker" } const ( continuationToken = "" ) func newAddress(cnr cid.ID, obj oid.ID) oid.Address { var addr oid.Address addr.SetContainer(cnr) addr.SetObject(obj) return addr } // objectHead returns all object's headers. func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) { prm := PrmObjectRead{ Container: bktInfo.CID, Object: idObj, WithHeader: true, } n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) res, err := n.frostFS.ReadObject(ctx, prm) if err != nil { return nil, err } return res.Head, nil } func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) { if _, isCombined := p.objInfo.Headers[MultipartObjectSize]; !isCombined { return n.initFrostFSObjectPayloadReader(ctx, getFrostFSParams{ off: p.off, ln: p.ln, oid: p.objInfo.ID, bktInfo: p.bktInfo, }) } combinedObj, err := n.objectGet(ctx, p.bktInfo, p.objInfo.ID) if err != nil { return nil, fmt.Errorf("get combined object '%s': %w", p.objInfo.ID.EncodeToString(), err) } var parts []*data.PartInfo if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil { return nil, fmt.Errorf("unmarshal combined object parts: %w", err) } isEncrypted := FormEncryptionInfo(p.objInfo.Headers).Enabled objParts := make([]partObj, len(parts)) for i, part := range parts { size := part.Size if isEncrypted { if size, err = sio.EncryptedSize(part.Size); err != nil { return nil, fmt.Errorf("compute encrypted size: %w", err) } } objParts[i] = partObj{ OID: part.OID, Size: size, } } return newMultiObjectReader(ctx, multiObjectReaderConfig{ layer: n, off: p.off, ln: p.ln, parts: objParts, bktInfo: p.bktInfo, }) } // initializes payload reader of the FrostFS object. // Zero range corresponds to full payload (panics if only offset is set). func (n *layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) { prm := PrmObjectRead{ Container: p.bktInfo.CID, Object: p.oid, WithPayload: true, PayloadRange: [2]uint64{p.off, p.ln}, } n.prepareAuthParameters(ctx, &prm.PrmAuth, p.bktInfo.Owner) res, err := n.frostFS.ReadObject(ctx, prm) if err != nil { return nil, err } return res.Payload, nil } // objectGet returns an object with payload in the object. func (n *layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*object.Object, error) { prm := PrmObjectRead{ Container: bktInfo.CID, Object: objID, WithHeader: true, WithPayload: true, } n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) res, err := n.frostFS.ReadObject(ctx, prm) if err != nil { return nil, err } return res.Head, nil } // MimeByFilePath detect mime type by file path extension. func MimeByFilePath(path string) string { ext := filepath.Ext(path) if len(ext) == 0 { return "" } return mime.TypeByExtension(ext) } func encryptionReader(r io.Reader, size uint64, key []byte) (io.Reader, uint64, error) { encSize, err := sio.EncryptedSize(size) if err != nil { return nil, 0, fmt.Errorf("failed to compute enc size: %w", err) } r, err = sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, MaxVersion: sio.Version20, Key: key, CipherSuites: []byte{sio.AES_256_GCM}}) if err != nil { return nil, 0, fmt.Errorf("couldn't create encrypter: %w", err) } return r, encSize, nil } func ParseCompletedPartHeader(hdr string) (*Part, error) { // partInfo[0] -- part number, partInfo[1] -- part size, partInfo[2] -- checksum partInfo := strings.Split(hdr, "-") if len(partInfo) != 3 { return nil, fmt.Errorf("invalid completed part header") } num, err := strconv.Atoi(partInfo[0]) if err != nil { return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err) } size, err := strconv.ParseUint(partInfo[1], 10, 64) if err != nil { return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err) } return &Part{ ETag: partInfo[2], PartNumber: num, Size: size, }, nil } // PutObject stores object into FrostFS, took payload from io.Reader. func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) { bktSettings, err := n.GetBucketSettings(ctx, p.BktInfo) if err != nil { return nil, fmt.Errorf("couldn't get versioning settings object: %w", err) } r := p.Reader if p.Encryption.Enabled() { p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10) if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil { return nil, fmt.Errorf("add encryption header: %w", err) } var encSize uint64 if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil { return nil, fmt.Errorf("create encrypter: %w", err) } p.Size = encSize } if r != nil { if len(p.Header[api.ContentType]) == 0 { if contentType := MimeByFilePath(p.Object); len(contentType) == 0 { d := newDetector(r) if contentType, err := d.Detect(); err == nil { p.Header[api.ContentType] = contentType } r = d.MultiReader() } else { p.Header[api.ContentType] = contentType } } } prm := PrmObjectCreate{ Container: p.BktInfo.CID, PayloadSize: p.Size, Filepath: p.Object, Payload: r, CreationTime: TimeNow(ctx), CopiesNumber: p.CopiesNumbers, } prm.Attributes = make([][2]string, 0, len(p.Header)) for k, v := range p.Header { prm.Attributes = append(prm.Attributes, [2]string{k, v}) } size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo) if err != nil { return nil, err } if len(p.ContentMD5) > 0 { headerMd5Hash, err := base64.StdEncoding.DecodeString(p.ContentMD5) if err != nil { return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest) } if !bytes.Equal(headerMd5Hash, md5Hash) { err = n.objectDelete(ctx, p.BktInfo, id) if err != nil { n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id)) } return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest) } } if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) { contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash) if err != nil { return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch) } if !bytes.Equal(contentHashBytes, hash) { err = n.objectDelete(ctx, p.BktInfo, id) if err != nil { n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id)) } return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch) } } n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id)) newVersion := &data.NodeVersion{ BaseNodeVersion: data.BaseNodeVersion{ OID: id, ETag: hex.EncodeToString(hash), FilePath: p.Object, Size: size, }, IsUnversioned: !bktSettings.VersioningEnabled(), IsCombined: p.Header[MultipartObjectSize] != "", } if len(p.CompleteMD5Hash) > 0 { newVersion.MD5 = p.CompleteMD5Hash } else { newVersion.MD5 = hex.EncodeToString(md5Hash) } if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil { return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err) } if p.Lock != nil && (p.Lock.Retention != nil || p.Lock.LegalHold != nil) { putLockInfoPrms := &PutLockInfoParams{ ObjVersion: &ObjectVersion{ BktInfo: p.BktInfo, ObjectName: p.Object, VersionID: id.EncodeToString(), }, NewLock: p.Lock, CopiesNumbers: p.CopiesNumbers, NodeVersion: newVersion, // provide new version to make one less tree service call in PutLockInfo } if err = n.PutLockInfo(ctx, putLockInfoPrms); err != nil { return nil, err } } n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID) objInfo := &data.ObjectInfo{ ID: id, CID: p.BktInfo.CID, Owner: n.gateOwner, Bucket: p.BktInfo.Name, Name: p.Object, Size: size, Created: prm.CreationTime, Headers: p.Header, ContentType: p.Header[api.ContentType], HashSum: newVersion.ETag, MD5Sum: newVersion.MD5, } extendedObjInfo := &data.ExtendedObjectInfo{ ObjectInfo: objInfo, NodeVersion: newVersion, } n.cache.PutObjectWithName(n.BearerOwner(ctx), extendedObjInfo) return extendedObjInfo, nil } func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.BucketInfo, objectName string) (*data.ExtendedObjectInfo, error) { owner := n.BearerOwner(ctx) if extObjInfo := n.cache.GetLastObject(owner, bkt.Name, objectName); extObjInfo != nil { return extObjInfo, nil } node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName) if err != nil { if errors.Is(err, ErrNodeNotFound) { return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error()) } return nil, err } if node.IsDeleteMarker() { return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrNoSuchKey} } meta, err := n.objectHead(ctx, bkt, node.OID) if err != nil { if client.IsErrObjectNotFound(err) { return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error()) } return nil, err } objInfo := objectInfoFromMeta(bkt, meta) objInfo.MD5Sum = node.MD5 extObjInfo := &data.ExtendedObjectInfo{ ObjectInfo: objInfo, NodeVersion: node, } n.cache.PutObjectWithName(owner, extObjInfo) return extObjInfo, nil } func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadObjectParams) (*data.ExtendedObjectInfo, error) { var err error var foundVersion *data.NodeVersion if p.VersionID == data.UnversionedObjectVersionID { foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object) if err != nil { if errors.Is(err, ErrNodeNotFound) { return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error()) } return nil, err } } else { versions, err := n.treeService.GetVersions(ctx, bkt, p.Object) if err != nil { return nil, fmt.Errorf("couldn't get versions: %w", err) } for _, version := range versions { if version.OID.EncodeToString() == p.VersionID { foundVersion = version break } } if foundVersion == nil { return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)) } } owner := n.BearerOwner(ctx) if extObjInfo := n.cache.GetObject(owner, newAddress(bkt.CID, foundVersion.OID)); extObjInfo != nil { return extObjInfo, nil } if foundVersion.IsDeleteMarker() { return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrMethodNotAllowed} } meta, err := n.objectHead(ctx, bkt, foundVersion.OID) if err != nil { if client.IsErrObjectNotFound(err) { return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error()) } return nil, err } objInfo := objectInfoFromMeta(bkt, meta) objInfo.MD5Sum = foundVersion.MD5 extObjInfo := &data.ExtendedObjectInfo{ ObjectInfo: objInfo, NodeVersion: foundVersion, } n.cache.PutObject(owner, extObjInfo) return extObjInfo, nil } // objectDelete puts tombstone object into frostfs. func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error { prm := PrmObjectDelete{ Container: bktInfo.CID, Object: idObj, } n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) n.cache.DeleteObject(newAddress(bktInfo.CID, idObj)) return n.frostFS.DeleteObject(ctx, prm) } // objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject. // Returns object ID and payload sha256 hash. func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) { n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) prm.ClientCut = n.features.ClientCut() prm.BufferMaxSize = n.features.BufferMaxSizeForPut() prm.WithoutHomomorphicHash = bktInfo.HomomorphicHashDisabled var size uint64 hash := sha256.New() md5Hash := md5.New() prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) { size += uint64(len(buf)) hash.Write(buf) md5Hash.Write(buf) }) id, err := n.frostFS.CreateObject(ctx, prm) if err != nil { if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil { n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard)) } return 0, oid.ID{}, nil, nil, err } return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil } // ListObjectsV1 returns objects in a bucket for requests of Version 1. func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) { var result ListObjectsInfoV1 prm := allObjectParams{ Bucket: p.BktInfo, Delimiter: p.Delimiter, Prefix: p.Prefix, MaxKeys: p.MaxKeys, Marker: p.Marker, } objects, next, err := n.getLatestObjectsVersions(ctx, prm) if err != nil { return nil, err } if next != nil { result.IsTruncated = true result.NextMarker = objects[len(objects)-1].Name } result.Prefixes, result.Objects = triageObjects(objects) return &result, nil } // ListObjectsV2 returns objects in a bucket for requests of Version 2. func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) { var result ListObjectsInfoV2 prm := allObjectParams{ Bucket: p.BktInfo, Delimiter: p.Delimiter, Prefix: p.Prefix, MaxKeys: p.MaxKeys, Marker: p.StartAfter, ContinuationToken: p.ContinuationToken, } objects, next, err := n.getLatestObjectsVersionsV2(ctx, prm) if err != nil { return nil, err } if next != nil { result.IsTruncated = true result.NextContinuationToken = next.ID.EncodeToString() } result.Prefixes, result.Objects = triageObjects(objects) return &result, nil } type logWrapper struct { log *zap.Logger } func (l *logWrapper) Printf(format string, args ...interface{}) { l.log.Info(fmt.Sprintf(format, args...)) } func PrintMemUsage() { var m runtime.MemStats runtime.ReadMemStats(&m) // For info on each, see: https://golang.org/pkg/runtime/#MemStats fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc)) fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc)) fmt.Printf("\tSys = %v MiB", bToMb(m.Sys)) fmt.Printf("\tNumGC = %v\n", m.NumGC) } func bToMb(b uint64) uint64 { return b / 1024 / 1024 } func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) { if p.MaxKeys == 0 { return nil, nil, nil } owner := n.BearerOwner(ctx) cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true) nodeVersions := n.cache.GetList(owner, cacheKey) if nodeVersions == nil { nodeVersions, err = n.treeService.GetLatestVersionsByPrefix(ctx, p.Bucket, p.Prefix) if err != nil { return nil, nil, err } n.cache.PutList(owner, cacheKey, nodeVersions) } if len(nodeVersions) == 0 { return nil, nil, nil } sort.Slice(nodeVersions, func(i, j int) bool { return nodeVersions[i].FilePath < nodeVersions[j].FilePath }) poolCtx, cancel := context.WithCancel(ctx) defer cancel() objOutCh, err := n.initWorkerPool(poolCtx, 2, p, nodesGenerator(poolCtx, p, nodeVersions)) if err != nil { return nil, nil, fmt.Errorf("failed to init worker pool: %w", err) } objects = make([]*data.ObjectInfo, 0, p.MaxKeys) for obj := range objOutCh { objects = append(objects, obj) } //for node := range nodesGenerator(poolCtx, p, nodeVersions) { // objects = append(objects, &data.ObjectInfo{ // ID: node.OID, // IsDir: false, // IsDeleteMarker: node.IsDeleteMarker(), // Name: node.FilePath, // Size: node.Size, // Created: time.Time{}, // HashSum: node.ETag, // Owner: user.ID{}, // Headers: nil, // }) //} sort.Slice(objects, func(i, j int) bool { return objects[i].Name < objects[j].Name }) if len(objects) > p.MaxKeys { next = objects[p.MaxKeys] objects = objects[:p.MaxKeys] } return } func (n *layer) getLatestObjectsVersionsV2(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) { if p.MaxKeys == 0 { return nil, nil, nil } owner := n.BearerOwner(ctx) cacheKey := cache.CreateListSessionCacheKey(p.Bucket.CID, p.Prefix, p.ContinuationToken) session := n.cache.GetListSession(owner, cacheKey) if session != nil { // after reading next object from stream in session // the current cache value already doesn't match with next token in cache key n.cache.DeleteListSession(owner, cacheKey) } else { session = &data.ListSession{NamesMap: make(map[string]struct{})} session.Context, session.Cancel = context.WithCancel(context.Background()) if bd, err := middleware.GetBoxData(ctx); err == nil { session.Context = middleware.SetBoxData(session.Context, bd) } session.Stream, err = n.treeService.GetLatestVersionsByPrefixStream(session.Context, p.Bucket, p.Prefix) if err != nil { return nil, nil, err } } poolCtx, cancel := context.WithCancel(ctx) defer cancel() generator, errorCh := nodesGeneratorStream(poolCtx, p, session) objOutCh, err := n.initWorkerPoolStream(poolCtx, 2, p, generator) if err != nil { return nil, nil, fmt.Errorf("failed to init worker pool: %w", err) } objects = make([]*data.ObjectInfo, 0, p.MaxKeys+1) if session.Next != nil { objects = append(objects, session.Next) } for obj := range objOutCh { objects = append(objects, obj) } //for node := range generator { // objects = append(objects, &data.ObjectInfo{ // ID: node.OID, // IsDir: false, // IsDeleteMarker: node.IsDeleteMarker(), // Name: node.FilePath, // Size: node.Size, // Created: time.Time{}, // HashSum: node.ETag, // Owner: user.ID{}, // Headers: nil, // }) //} if err = <-errorCh; err != nil { return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err) } sort.Slice(objects, func(i, j int) bool { return objects[i].Name < objects[j].Name }) if len(objects) > p.MaxKeys { next = objects[p.MaxKeys] objects = objects[:p.MaxKeys] } if next != nil { session.Next = next n.cache.PutListSession(owner, cache.CreateListSessionCacheKey(p.Bucket.CID, p.Prefix, next.VersionID()), session) } return } func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion { nodeCh := make(chan *data.NodeVersion) existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories go func() { var generated int LOOP: for _, node := range nodeVersions { if shouldSkip(node, p, existed) { continue } select { case <-ctx.Done(): break LOOP case nodeCh <- node: generated++ if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken break LOOP } } } close(nodeCh) }() return nodeCh } func nodesGeneratorVersions(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion { nodeCh := make(chan *data.NodeVersion) existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories go func() { var generated int LOOP: for _, node := range nodeVersions { if shouldSkipVersions(node, p, existed) { continue } select { case <-ctx.Done(): break LOOP case nodeCh <- node: generated++ if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken break LOOP } } } close(nodeCh) }() return nodeCh } func nodesGeneratorStream(ctx context.Context, p allObjectParams, stream *data.ListSession) (<-chan *data.NodeVersion, <-chan error) { nodeCh := make(chan *data.NodeVersion, 1000) errCh := make(chan error, 1) //existed := make(map[string]struct{}, p.MaxKeys) // to squash the same directories existed := stream.NamesMap if stream.Next != nil { existed[continuationToken] = struct{}{} } limit := p.MaxKeys if stream.Next == nil { limit++ } go func() { var generated int var err error LOOP: for err == nil { node, err := stream.Stream.Next(ctx) if err != nil { if !errors.Is(err, io.EOF) { fmt.Println(ctx.Err()) errCh <- fmt.Errorf("stream next: %w", err) } break LOOP } if shouldSkip(node, p, existed) { continue } select { case <-ctx.Done(): break LOOP case nodeCh <- node: generated++ if generated == limit { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken break LOOP } } } close(nodeCh) close(errCh) }() return nodeCh, errCh } func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) { reqLog := n.reqLogger(ctx) pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog})) if err != nil { return nil, fmt.Errorf("coudln't init go pool for listing: %w", err) } objCh := make(chan *data.ObjectInfo) go func() { var wg sync.WaitGroup LOOP: for node := range input { select { case <-ctx.Done(): break LOOP default: } // We have to make a copy of pointer to data.NodeVersion // to get correct value in submitted task function. func(node *data.NodeVersion) { wg.Add(1) err = pool.Submit(func() { defer wg.Done() oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter) if oi == nil { // try to get object again if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil { // do not process object which are definitely missing in object service return } } select { case <-ctx.Done(): case objCh <- oi: } }) if err != nil { wg.Done() reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err)) } }(node) } wg.Wait() close(objCh) pool.Release() }() return objCh, nil } func (n *layer) initWorkerPoolVersions(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ExtendedObjectInfo, error) { reqLog := n.reqLogger(ctx) pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog})) if err != nil { return nil, fmt.Errorf("coudln't init go pool for listing: %w", err) } objCh := make(chan *data.ExtendedObjectInfo) go func() { var wg sync.WaitGroup LOOP: for node := range input { select { case <-ctx.Done(): break LOOP default: } // We have to make a copy of pointer to data.NodeVersion // to get correct value in submitted task function. func(node *data.NodeVersion) { wg.Add(1) err = pool.Submit(func() { defer wg.Done() oi := &data.ObjectInfo{} if node.IsDeleteMarker() { // delete marker does not match any object in FrostFS oi.ID = node.OID oi.Name = node.FilePath oi.Owner = node.DeleteMarker.Owner oi.Created = node.DeleteMarker.Created oi.IsDeleteMarker = true } else { oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter) if oi == nil { // try to get object again if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil { // do not process object which are definitely missing in object service return } } } eoi := &data.ExtendedObjectInfo{ ObjectInfo: oi, NodeVersion: node, } select { case <-ctx.Done(): case objCh <- eoi: } }) if err != nil { wg.Done() reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err)) } }(node) } wg.Wait() close(objCh) pool.Release() }() return objCh, nil } func (n *layer) initWorkerPoolStream(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) { reqLog := n.reqLogger(ctx) pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog})) if err != nil { return nil, fmt.Errorf("coudln't init go pool for listing: %w", err) } objCh := make(chan *data.ObjectInfo) go func() { var wg sync.WaitGroup LOOP: for node := range input { select { case <-ctx.Done(): break LOOP default: } // We have to make a copy of pointer to data.NodeVersion // to get correct value in submitted task function. func(node *data.NodeVersion) { wg.Add(1) err = pool.Submit(func() { defer wg.Done() oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter) if oi == nil { // try to get object again if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil { // do not process object which are definitely missing in object service return } } select { case <-ctx.Done(): case objCh <- oi: } }) if err != nil { wg.Done() reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err)) } }(node) } wg.Wait() close(objCh) pool.Release() }() return objCh, nil } func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) { var err error owner := n.BearerOwner(ctx) cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false) nodeVersions := n.cache.GetList(owner, cacheKey) if nodeVersions == nil { nodeVersions, err = n.treeService.GetAllVersionsByPrefix(ctx, bkt, prefix) if err != nil { return nil, fmt.Errorf("get all versions from tree service: %w", err) } n.cache.PutList(owner, cacheKey, nodeVersions) } return nodeVersions, nil } func (n *layer) getAllObjectsVersions(ctx context.Context, p *ListObjectVersionsParams) (map[string][]*data.ExtendedObjectInfo, error) { nodeVersions, err := n.bucketNodeVersions(ctx, p.BktInfo, p.Prefix) if err != nil { return nil, err } versions := make(map[string][]*data.ExtendedObjectInfo, len(nodeVersions)) sort.Slice(nodeVersions, func(i, j int) bool { return nodeVersions[i].FilePath < nodeVersions[j].FilePath }) poolCtx, cancel := context.WithCancel(ctx) defer cancel() pp := allObjectParams{ Bucket: p.BktInfo, Delimiter: p.Delimiter, Prefix: p.Prefix, Marker: p.KeyMarker, ContinuationToken: p.VersionIDMarker, MaxKeys: p.MaxKeys, } objOutCh, err := n.initWorkerPoolVersions(poolCtx, 2, pp, nodesGeneratorVersions(poolCtx, pp, nodeVersions)) if err != nil { return nil, err } for eoi := range objOutCh { objVersions, ok := versions[eoi.ObjectInfo.Name] if !ok { objVersions = []*data.ExtendedObjectInfo{eoi} } else if !eoi.ObjectInfo.IsDir { objVersions = append(objVersions, eoi) } versions[eoi.ObjectInfo.Name] = objVersions } return versions, nil } func IsSystemHeader(key string) bool { _, ok := api.SystemMetadata[key] return ok || strings.HasPrefix(key, api.FrostFSSystemMetadataPrefix) } func shouldSkip(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool { if node.IsDeleteMarker() { return true } filePath := node.FilePath if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 { filePath = dirName } if _, ok := existed[filePath]; ok { return true } if filePath <= p.Marker { return true } if p.ContinuationToken != "" { if _, ok := existed[continuationToken]; !ok { if p.ContinuationToken != node.OID.EncodeToString() { return true } existed[continuationToken] = struct{}{} } } existed[filePath] = struct{}{} return false } func shouldSkipVersions(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool { filePath := node.FilePath if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 { filePath = dirName if _, ok := existed[filePath]; ok { return true } } if filePath < p.Marker { return true } if p.ContinuationToken != "" { if _, ok := existed[continuationToken]; !ok { if p.ContinuationToken != node.OID.EncodeToString() { return true } existed[continuationToken] = struct{}{} } } existed[filePath] = struct{}{} return false } func triageObjects(allObjects []*data.ObjectInfo) (prefixes []string, objects []*data.ObjectInfo) { for _, ov := range allObjects { if ov.IsDir { prefixes = append(prefixes, ov.Name) } else { objects = append(objects, ov) } } return } func triageExtendedObjects(allObjects []*data.ExtendedObjectInfo) (prefixes []string, objects []*data.ExtendedObjectInfo) { for _, ov := range allObjects { if ov.ObjectInfo.IsDir { prefixes = append(prefixes, ov.ObjectInfo.Name) } else { objects = append(objects, ov) } } return } func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) { if oiDir := tryDirectory(bktInfo, node, prefix, delimiter); oiDir != nil { return oiDir } owner := n.BearerOwner(ctx) if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil { return extInfo.ObjectInfo } meta, err := n.objectHead(ctx, bktInfo, node.OID) if err != nil { n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err)) return nil } oi = objectInfoFromMeta(bktInfo, meta) oi.MD5Sum = node.MD5 n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node}) return oi } func tryDirectory(bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) *data.ObjectInfo { dirName := tryDirectoryName(node, prefix, delimiter) if len(dirName) == 0 { return nil } return &data.ObjectInfo{ ID: node.OID, // to use it as continuation token CID: bktInfo.CID, IsDir: true, IsDeleteMarker: node.IsDeleteMarker(), Bucket: bktInfo.Name, Name: dirName, } } // tryDirectoryName forms directory name by prefix and delimiter. // If node isn't a directory empty string is returned. // This function doesn't check if node has a prefix. It must do a caller. func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string { if len(delimiter) == 0 { return "" } tail := strings.TrimPrefix(node.FilePath, prefix) index := strings.Index(tail, delimiter) if index >= 0 { return prefix + tail[:index+1] } return "" } func wrapReader(input io.Reader, bufSize int, f func(buf []byte)) io.Reader { if input == nil { return nil } r, w := io.Pipe() go func() { var buf = make([]byte, bufSize) for { n, err := input.Read(buf) if n > 0 { f(buf[:n]) _, _ = w.Write(buf[:n]) // ignore error, input is not ReadCloser } if err != nil { _ = w.CloseWithError(err) break } } }() return r }