forked from TrueCloudLab/frostfs-s3-gw
[#165] Add batching in streamin listing
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
parent
6e8960b2ab
commit
b52552e8c2
4 changed files with 80 additions and 8 deletions
|
@ -13,6 +13,7 @@ import (
|
|||
"io"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -596,6 +597,20 @@ func (l *logWrapper) Printf(format string, args ...interface{}) {
|
|||
l.log.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func PrintMemUsage() {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
// For info on each, see: https://golang.org/pkg/runtime/#MemStats
|
||||
fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc))
|
||||
fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc))
|
||||
fmt.Printf("\tSys = %v MiB", bToMb(m.Sys))
|
||||
fmt.Printf("\tNumGC = %v\n", m.NumGC)
|
||||
}
|
||||
|
||||
func bToMb(b uint64) uint64 {
|
||||
return b / 1024 / 1024
|
||||
}
|
||||
|
||||
func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, nil, nil
|
||||
|
@ -634,6 +649,20 @@ func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams)
|
|||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
//for node := range nodesGenerator(poolCtx, p, nodeVersions) {
|
||||
// objects = append(objects, &data.ObjectInfo{
|
||||
// ID: node.OID,
|
||||
// IsDir: false,
|
||||
// IsDeleteMarker: node.IsDeleteMarker(),
|
||||
// Name: node.FilePath,
|
||||
// Size: node.Size,
|
||||
// Created: time.Time{},
|
||||
// HashSum: node.ETag,
|
||||
// Owner: user.ID{},
|
||||
// Headers: nil,
|
||||
// })
|
||||
//}
|
||||
|
||||
sort.Slice(objects, func(i, j int) bool {
|
||||
return objects[i].Name < objects[j].Name
|
||||
})
|
||||
|
@ -652,7 +681,7 @@ func (n *layer) getLatestObjectsVersionsV2(ctx context.Context, p allObjectParam
|
|||
}
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateListSessionCacheKey(p.Bucket.CID, p.Prefix, p.Delimiter)
|
||||
cacheKey := cache.CreateListSessionCacheKey(p.Bucket.CID, p.Prefix, p.ContinuationToken)
|
||||
session := n.cache.GetListSession(owner, cacheKey)
|
||||
if session != nil {
|
||||
// after reading next object from stream in session
|
||||
|
@ -690,6 +719,20 @@ func (n *layer) getLatestObjectsVersionsV2(ctx context.Context, p allObjectParam
|
|||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
//for node := range generator {
|
||||
// objects = append(objects, &data.ObjectInfo{
|
||||
// ID: node.OID,
|
||||
// IsDir: false,
|
||||
// IsDeleteMarker: node.IsDeleteMarker(),
|
||||
// Name: node.FilePath,
|
||||
// Size: node.Size,
|
||||
// Created: time.Time{},
|
||||
// HashSum: node.ETag,
|
||||
// Owner: user.ID{},
|
||||
// Headers: nil,
|
||||
// })
|
||||
//}
|
||||
|
||||
if err = <-errorCh; err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err)
|
||||
}
|
||||
|
@ -768,7 +811,7 @@ func nodesGeneratorVersions(ctx context.Context, p allObjectParams, nodeVersions
|
|||
}
|
||||
|
||||
func nodesGeneratorStream(ctx context.Context, p allObjectParams, stream *data.ListSession) (<-chan *data.NodeVersion, <-chan error) {
|
||||
nodeCh := make(chan *data.NodeVersion)
|
||||
nodeCh := make(chan *data.NodeVersion, 1000)
|
||||
errCh := make(chan error, 1)
|
||||
//existed := make(map[string]struct{}, p.MaxKeys) // to squash the same directories
|
||||
existed := stream.NamesMap
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue