package handler import ( "context" "html/template" "net/url" "runtime" "sort" "strconv" "strings" "sync" "time" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/docker/go-units" "github.com/panjf2000/ants/v2" "github.com/valyala/fasthttp" "go.uber.org/zap" ) const ( dateFormat = "02-01-2006 15:04" attrOID = "OID" attrCreated = "Created" attrFileName = "FileName" attrSize = "Size" ) type ( BrowsePageData struct { Container string Prefix string Protocol string Objects []ResponseObject } ResponseObject struct { OID string Created string FileName string FilePath string Size string IsDir bool } ) func newListObjectsResponseS3(attrs map[string]string) ResponseObject { return ResponseObject{ Created: formatTimestamp(attrs[attrCreated]), FileName: attrs[attrFileName], Size: attrs[attrSize], IsDir: attrs[attrOID] == "", } } func newListObjectsResponseNative(attrs map[string]string) ResponseObject { filename := lastPathElement(attrs[object.AttributeFilePath]) if filename == "" { filename = attrs[attrFileName] } return ResponseObject{ OID: attrs[attrOID], Created: formatTimestamp(attrs[object.AttributeTimestamp] + "000"), FileName: filename, FilePath: attrs[object.AttributeFilePath], Size: attrs[attrSize], IsDir: false, } } func getNextDir(filepath, prefix string) string { restPath := strings.Replace(filepath, prefix, "", 1) index := strings.Index(restPath, "/") if index == -1 { return "" } return restPath[:index] } func lastPathElement(path string) string { if path == "" { return path } index := strings.LastIndex(path, "/") if index == len(path)-1 { index = strings.LastIndex(path[:index], "/") } return path[index+1:] } func parseTimestamp(tstamp string) (time.Time, error) { millis, err := strconv.ParseInt(tstamp, 10, 64) if err != nil { return time.Time{}, err } return time.UnixMilli(millis), nil } func formatTimestamp(strdate string) string { date, err := parseTimestamp(strdate) if err != nil || date.IsZero() { return "" } return date.Format(dateFormat) } func formatSize(strsize string) string { size, err := strconv.ParseFloat(strsize, 64) if err != nil { return "0B" } return units.HumanSize(size) } func parentDir(prefix string) string { index := strings.LastIndex(prefix, "/") if index == -1 { return prefix } return prefix[index:] } func trimPrefix(encPrefix string) string { prefix, err := url.PathUnescape(encPrefix) if err != nil { return "" } slashIndex := strings.LastIndex(prefix, "/") if slashIndex == -1 { return "" } return prefix[:slashIndex] } func urlencode(path string) string { var res strings.Builder prefixParts := strings.Split(path, "/") for _, prefixPart := range prefixParts { prefixPart = "/" + url.PathEscape(prefixPart) if prefixPart == "/." || prefixPart == "/.." { prefixPart = url.PathEscape(prefixPart) } res.WriteString(prefixPart) } return res.String() } func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) ([]ResponseObject, error) { nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true) if err != nil { return nil, err } var objects = make([]ResponseObject, 0, len(nodes)) for _, node := range nodes { meta := node.GetMeta() if meta == nil { continue } var attrs = make(map[string]string, len(meta)) for _, m := range meta { attrs[m.GetKey()] = string(m.GetValue()) } obj := newListObjectsResponseS3(attrs) obj.FilePath = prefix + obj.FileName objects = append(objects, obj) } return objects, nil } func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) ([]ResponseObject, error) { var basePath string if ind := strings.LastIndex(prefix, "/"); ind != -1 { basePath = prefix[:ind+1] } filters := object.NewSearchFilters() filters.AddRootFilter() if prefix != "" { filters.AddFilter(object.AttributeFilePath, prefix, object.MatchCommonPrefix) } prm := PrmObjectSearch{ PrmAuth: PrmAuth{ BearerToken: bearerToken(ctx), }, Container: bucketInfo.CID, Filters: filters, } objectIDs, err := h.frostfs.SearchObjects(ctx, prm) if err != nil { return nil, err } defer objectIDs.Close() return h.headDirObjects(ctx, bucketInfo.CID, objectIDs, basePath) } type workerParams struct { cnrID cid.ID objectIDs ResObjectSearch basePath string errCh chan error objCh chan ResponseObject cancel context.CancelFunc } func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs ResObjectSearch, basePath string) ([]ResponseObject, error) { const initialSliceCapacity = 100 var wg sync.WaitGroup var dirs sync.Map log := h.log.With( zap.String("cid", cnrID.EncodeToString()), zap.String("path", basePath), ) done := make(chan struct{}) objects := make([]ResponseObject, 0, initialSliceCapacity) ctx, cancel := context.WithCancel(ctx) p := workerParams{ cnrID: cnrID, objectIDs: objectIDs, basePath: basePath, errCh: make(chan error, 1), objCh: make(chan ResponseObject, 1), cancel: cancel, } defer cancel() go func() { for err := range p.errCh { if err != nil { log.Error(logs.FailedToHeadObject, zap.Error(err)) } } done <- struct{}{} }() go func() { for obj := range p.objCh { objects = append(objects, obj) } done <- struct{}{} }() pool, err := ants.NewPool(runtime.NumCPU()) if err != nil { return nil, err } defer pool.Release() err = objectIDs.Iterate(func(id oid.ID) bool { wg.Add(1) if err = pool.Submit(func() { defer wg.Done() h.headDirObject(ctx, id, &dirs, p) }); err != nil { p.errCh <- err } select { case <-ctx.Done(): return true default: return false } }) wg.Wait() close(p.errCh) close(p.objCh) <-done <-done if err != nil { return nil, err } return objects, nil } func (h *Handler) headDirObject(ctx context.Context, objID oid.ID, dirs *sync.Map, p workerParams) { addr := newAddress(p.cnrID, objID) obj, err := h.frostfs.HeadObject(ctx, PrmObjectHead{ PrmAuth: PrmAuth{BearerToken: bearerToken(ctx)}, Address: addr, }) if err != nil { p.errCh <- err p.cancel() return } attrs := loadAttributes(obj.Attributes()) attrs[attrOID] = objID.EncodeToString() attrs[attrSize] = strconv.FormatUint(obj.PayloadSize(), 10) dirname := getNextDir(attrs[object.AttributeFilePath], p.basePath) if dirname == "" { p.objCh <- newListObjectsResponseNative(attrs) } else if _, ok := dirs.Load(dirname); !ok { p.objCh <- ResponseObject{ FileName: dirname, FilePath: p.basePath + dirname, IsDir: true, } dirs.Store(dirname, true) } } type browseParams struct { bucketInfo *data.BucketInfo prefix string isNative bool listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) ([]ResponseObject, error) } func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) { const S3Protocol = "s3" const FrostfsProtocol = "frostfs" ctx := utils.GetContextFromRequest(c) reqLog := utils.GetReqLogOrDefault(ctx, h.log) log := reqLog.With( zap.String("bucket", p.bucketInfo.Name), zap.String("container", p.bucketInfo.CID.EncodeToString()), zap.String("prefix", p.prefix), ) objects, err := p.listObjects(ctx, p.bucketInfo, p.prefix) if err != nil { logAndSendBucketError(c, log, err) return } sort.Slice(objects, func(i, j int) bool { if objects[i].IsDir == objects[j].IsDir { return objects[i].FileName < objects[j].FileName } return objects[i].IsDir }) tmpl, err := template.New("index").Funcs(template.FuncMap{ "formatSize": formatSize, "trimPrefix": trimPrefix, "urlencode": urlencode, "parentDir": parentDir, }).Parse(h.config.IndexPageTemplate()) if err != nil { logAndSendBucketError(c, log, err) return } bucketName := p.bucketInfo.Name protocol := S3Protocol if p.isNative { bucketName = p.bucketInfo.CID.EncodeToString() protocol = FrostfsProtocol } if err = tmpl.Execute(c, &BrowsePageData{ Container: bucketName, Prefix: p.prefix, Objects: objects, Protocol: protocol, }); err != nil { logAndSendBucketError(c, log, err) return } }