[#170] Support tar.gz archives #177
15 changed files with 492 additions and 276 deletions
|
@ -97,7 +97,7 @@ type (
|
|||
|
||||
mu sync.RWMutex
|
||||
defaultTimestamp bool
|
||||
zipCompression bool
|
||||
archiveCompression bool
|
||||
clientCut bool
|
||||
returnIndexPage bool
|
||||
indexPageTemplate string
|
||||
|
@ -178,7 +178,7 @@ func (a *app) initAppSettings() {
|
|||
|
||||
func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
||||
defaultTimestamp := v.GetBool(cfgUploaderHeaderEnableDefaultTimestamp)
|
||||
zipCompression := v.GetBool(cfgZipCompression)
|
||||
archiveCompression := fetchArchiveCompression(v)
|
||||
returnIndexPage := v.GetBool(cfgIndexPageEnabled)
|
||||
clientCut := v.GetBool(cfgClientCut)
|
||||
bufferMaxSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
|
||||
|
@ -197,7 +197,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
|||
defer s.mu.Unlock()
|
||||
|
||||
s.defaultTimestamp = defaultTimestamp
|
||||
s.zipCompression = zipCompression
|
||||
s.archiveCompression = archiveCompression
|
||||
s.returnIndexPage = returnIndexPage
|
||||
s.clientCut = clientCut
|
||||
s.bufferMaxSizeForPut = bufferMaxSizeForPut
|
||||
|
@ -236,10 +236,10 @@ func (s *appSettings) DefaultTimestamp() bool {
|
|||
return s.defaultTimestamp
|
||||
}
|
||||
|
||||
func (s *appSettings) ZipCompression() bool {
|
||||
func (s *appSettings) ArchiveCompression() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.zipCompression
|
||||
return s.archiveCompression
|
||||
}
|
||||
|
||||
func (s *appSettings) IndexPageEnabled() bool {
|
||||
|
@ -656,8 +656,10 @@ func (a *app) configureRouter(h *handler.Handler) {
|
|||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
|
||||
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
|
||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZipped))
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
|
||||
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
|
||||
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
|
||||
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
|
||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||
|
||||
a.webServer.Handler = r.Handler
|
||||
|
|
|
@ -128,8 +128,13 @@ const (
|
|||
cfgResolveOrder = "resolve_order"
|
||||
|
||||
// Zip compression.
|
||||
//
|
||||
// Deprecated: Use cfgArchiveCompression instead.
|
||||
cfgZipCompression = "zip.compression"
|
||||
|
||||
// Archive compression.
|
||||
cfgArchiveCompression = "archive.compression"
|
||||
|
||||
// Runtime.
|
||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||
|
||||
|
@ -255,9 +260,6 @@ func settings() *viper.Viper {
|
|||
// upload header
|
||||
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
||||
|
||||
// zip:
|
||||
v.SetDefault(cfgZipCompression, false)
|
||||
|
||||
// metrics
|
||||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
||||
|
@ -844,3 +846,10 @@ func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
|
|||
|
||||
return attributes, nil
|
||||
}
|
||||
|
||||
func fetchArchiveCompression(v *viper.Viper) bool {
|
||||
if v.IsSet(cfgZipCompression) {
|
||||
return v.GetBool(cfgZipCompression)
|
||||
}
|
||||
return v.GetBool(cfgArchiveCompression)
|
||||
}
|
||||
|
|
|
@ -97,9 +97,13 @@ HTTP_GW_REBALANCE_TIMER=30s
|
|||
# The number of errors on connection after which node is considered as unhealthy
|
||||
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
||||
|
||||
# Enable zip compression to download files by common prefix.
|
||||
# Enable archive compression to download files by common prefix.
|
||||
# DEPRECATED: Use HTTP_GW_ARCHIVE_COMPRESSION instead.
|
||||
HTTP_GW_ZIP_COMPRESSION=false
|
||||
|
||||
# Enable archive compression to download files by common prefix.
|
||||
HTTP_GW_ARCHIVE_COMPRESSION=false
|
||||
|
||||
HTTP_GW_TRACING_ENABLED=true
|
||||
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
||||
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||
|
|
|
@ -116,13 +116,19 @@ pool_error_threshold: 100 # The number of errors on connection after which node
|
|||
# Number of workers in handler's worker pool
|
||||
worker_pool_size: 1000
|
||||
|
||||
# Enable index page to see objects list for specified container and prefix
|
||||
# Enables index page to see objects list for specified container and prefix
|
||||
index_page:
|
||||
enabled: false
|
||||
template_path: internal/handler/templates/index.gotmpl
|
||||
|
||||
# Deprecated: Use archive.compression instead
|
||||
zip:
|
||||
compression: false # Enable zip compression to download files by common prefix.
|
||||
# Enables zip compression to download files by common prefix.
|
||||
compression: false
|
||||
|
||||
archive:
|
||||
# Enables archive compression to download files by common prefix.
|
||||
compression: false
|
||||
|
||||
runtime:
|
||||
soft_memory_limit: 1gb
|
||||
|
|
37
docs/api.md
37
docs/api.md
|
@ -1,11 +1,11 @@
|
|||
# HTTP Gateway Specification
|
||||
|
||||
| Route | Description |
|
||||
|-------------------------------------------------|----------------------------------------------|
|
||||
| `/upload/{cid}` | [Put object](#put-object) |
|
||||
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
||||
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
||||
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
|
||||
| Route | Description |
|
||||
|-------------------------------------------------|--------------------------------------------------|
|
||||
| `/upload/{cid}` | [Put object](#put-object) |
|
||||
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
||||
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
||||
| `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` | [Download objects in archive](#download-archive) |
|
||||
|
||||
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
||||
(the name must be registered in NNS, see appropriate section in [nns.md](./nns.md)).
|
||||
|
@ -56,12 +56,14 @@ Upload file as object with attributes to FrostFS.
|
|||
|
||||
###### Headers
|
||||
|
||||
| Header | Description |
|
||||
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Common headers | See [bearer token](#bearer-token). |
|
||||
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
||||
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
||||
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
||||
| Header | Description |
|
||||
|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Common headers | See [bearer token](#bearer-token). |
|
||||
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
||||
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
||||
| `X-Explode-Archive` | If set, gate tries to read files from uploading `tar` archive and creates an object for each file in it. Uploading `tar` could be compressed via Gzip by setting a `Content-Encoding` header. Sets a `FilePath` attribute as a relative path from archive root and a `FileName` as the last path element of the `FilePath`. |
|
||||
| `Content-Encoding` | If set and value is `gzip`, gate will handle uploading file as a `Gzip` compressed `tar` file. |
|
||||
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
||||
|
||||
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
||||
|
||||
|
@ -269,9 +271,9 @@ If more than one object is found, an arbitrary one will be used to get attribute
|
|||
| 400 | Some error occurred during operation. |
|
||||
| 404 | Container or object not found. |
|
||||
|
||||
## Download zip
|
||||
## Download archive
|
||||
|
||||
Route: `/zip/{cid}/{prefix}`
|
||||
Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
|
||||
|
||||
| Route parameter | Type | Description |
|
||||
|-----------------|-----------|---------------------------------------------------------|
|
||||
|
@ -282,12 +284,13 @@ Route: `/zip/{cid}/{prefix}`
|
|||
|
||||
#### GET
|
||||
|
||||
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
|
||||
Find objects by prefix for `FilePath` attributes. Return found objects in zip or tar archive.
|
||||
Name of files in archive sets to `FilePath` attribute of objects.
|
||||
Time of files sets to time when object has started downloading.
|
||||
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
|
||||
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` or
|
||||
`/tar/{cid}/` route.
|
||||
|
||||
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
|
||||
Archive can be compressed (see http-gw [configuration](gate-configuration.md#archive-section)).
|
||||
|
||||
##### Request
|
||||
|
||||
|
|
|
@ -218,9 +218,10 @@ upload_header:
|
|||
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
|
||||
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
|
||||
|
||||
|
||||
# `zip` section
|
||||
|
||||
> **_DEPRECATED:_** Use archive section instead
|
||||
|
||||
```yaml
|
||||
zip:
|
||||
compression: false
|
||||
|
@ -230,6 +231,17 @@ zip:
|
|||
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
|
||||
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
|
||||
|
||||
# `archive` section
|
||||
|
||||
```yaml
|
||||
archive:
|
||||
compression: false
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
|
||||
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
|
||||
|
||||
|
||||
# `pprof` section
|
||||
|
||||
|
|
|
@ -1,20 +1,21 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -46,7 +47,7 @@ func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|||
return
|
||||
}
|
||||
|
||||
req := h.newRequest(c, log)
|
||||
req := newRequest(c, log)
|
||||
|
||||
var objID oid.ID
|
||||
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
|
||||
|
@ -62,13 +63,6 @@ func shouldDownload(oidParam string, downloadParam bool) bool {
|
|||
return !isDir(oidParam) || downloadParam
|
||||
}
|
||||
|
||||
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
|
||||
return request{
|
||||
RequestCtx: ctx,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadByAttribute handles attribute-based download requests.
|
||||
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||
h.byAttribute(c, h.receiveFile)
|
||||
|
@ -90,13 +84,61 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
|
|||
return h.frostfs.SearchObjects(ctx, prm)
|
||||
}
|
||||
|
||||
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||
// DownloadZip handles zip by prefix requests.
|
||||
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||
|
||||
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
|
||||
}
|
||||
|
||||
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
||||
return func(w *bufio.Writer) {
|
||||
defer resSearch.Close()
|
||||
|
||||
buf := make([]byte, 3<<20)
|
||||
zipWriter := zip.NewWriter(w)
|
||||
var objectsWritten int
|
||||
|
||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
||||
func(obj *object.Object) (io.Writer, error) {
|
||||
objectsWritten++
|
||||
return h.createZipFile(zipWriter, obj)
|
||||
}),
|
||||
)
|
||||
if errIter != nil {
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
return
|
||||
} else if objectsWritten == 0 {
|
||||
log.Warn(logs.ObjectsNotFound)
|
||||
}
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||
method := zip.Store
|
||||
if h.config.ZipCompression() {
|
||||
if h.config.ArchiveCompression() {
|
||||
method = zip.Deflate
|
||||
}
|
||||
|
||||
filePath := getZipFilePath(obj)
|
||||
filePath := getFilePath(obj)
|
||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||
}
|
||||
|
@ -108,99 +150,139 @@ func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer,
|
|||
})
|
||||
}
|
||||
|
||||
// DownloadZipped handles zip by prefix requests.
|
||||
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||
// DownloadTar forms tar.gz from objects by prefix.
|
||||
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
prefix, _ := c.UserValue("prefix").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
|
||||
prefix, err := url.QueryUnescape(prefix)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
resSearch, err := h.search(ctx, bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||
c.Response.SetStatusCode(http.StatusOK)
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
|
||||
|
||||
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
||||
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
|
||||
}
|
||||
|
||||
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
||||
return func(w *bufio.Writer) {
|
||||
defer resSearch.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(w)
|
||||
compressionLevel := gzip.NoCompression
|
||||
if h.config.ArchiveCompression() {
|
||||
compressionLevel = gzip.DefaultCompression
|
||||
}
|
||||
|
||||
var bufZip []byte
|
||||
var addr oid.Address
|
||||
// ignore error because it's not nil only if compressionLevel argument is invalid
|
||||
gzipWriter, _ := gzip.NewWriterLevel(w, compressionLevel)
|
||||
tarWriter := tar.NewWriter(gzipWriter)
|
||||
|
||||
empty := true
|
||||
called := false
|
||||
btoken := bearerToken(ctx)
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
|
||||
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||
called = true
|
||||
|
||||
if empty {
|
||||
bufZip = make([]byte, 3<<20) // the same as for upload
|
||||
defer func() {
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseTarWriter, zap.Error(err))
|
||||
}
|
||||
empty = false
|
||||
|
||||
addr.SetObject(id)
|
||||
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
|
||||
if err := gzipWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseGzipWriter, zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return false
|
||||
})
|
||||
var objectsWritten int
|
||||
buf := make([]byte, 3<<20) // the same as for upload
|
||||
|
||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
||||
func(obj *object.Object) (io.Writer, error) {
|
||||
objectsWritten++
|
||||
return h.createTarFile(tarWriter, obj)
|
||||
}),
|
||||
)
|
||||
if errIter != nil {
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
} else if !called {
|
||||
log.Error(logs.ObjectsNotFound)
|
||||
} else if objectsWritten == 0 {
|
||||
log.Warn(logs.ObjectsNotFound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = zipWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||
}
|
||||
func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer, error) {
|
||||
filePath := getFilePath(obj)
|
||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||
}
|
||||
|
||||
return tw, tw.WriteHeader(&tar.Header{
|
||||
Name: filePath,
|
||||
Mode: 0655,
|
||||
Size: int64(obj.PayloadSize()),
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||
prm := PrmObjectGet{
|
||||
PrmAuth: PrmAuth{
|
||||
BearerToken: btoken,
|
||||
},
|
||||
Address: addr,
|
||||
}
|
||||
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
|
||||
return func(id oid.ID) bool {
|
||||
log = log.With(zap.String("oid", id.EncodeToString()))
|
||||
|
||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||
prm := PrmObjectGet{
|
||||
PrmAuth: PrmAuth{
|
||||
BearerToken: bearerToken(ctx),
|
||||
},
|
||||
Address: newAddress(cnrID, id),
|
||||
}
|
||||
|
||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetObject, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
fileWriter, err := createArchiveHeader(&resGet.Header)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
prefix, _ := c.UserValue("prefix").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
prefix, err := url.QueryUnescape(prefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get FrostFS object: %v", err)
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
|
||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||
|
||||
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zip create header: %v", err)
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return nil, err
|
||||
}
|
||||
return resSearch, nil
|
||||
}
|
||||
|
||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
||||
func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
|
||||
var err error
|
||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, buf); err != nil {
|
||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
||||
}
|
||||
|
||||
|
@ -208,14 +290,10 @@ func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid
|
|||
return fmt.Errorf("object body close error: %w", err)
|
||||
}
|
||||
|
||||
if err = zipWriter.Flush(); err != nil {
|
||||
return fmt.Errorf("flush zip writer: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getZipFilePath(obj *object.Object) string {
|
||||
func getFilePath(obj *object.Object) string {
|
||||
for _, attr := range obj.Attributes() {
|
||||
if attr.Key() == object.AttributeFilePath {
|
||||
return attr.Value()
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
type Config interface {
|
||||
DefaultTimestamp() bool
|
||||
ZipCompression() bool
|
||||
ArchiveCompression() bool
|
||||
ClientCut() bool
|
||||
IndexPageEnabled() bool
|
||||
IndexPageTemplate() string
|
||||
|
@ -216,7 +216,7 @@ func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path
|
|||
}
|
||||
|
||||
addr := newAddress(cnrID, foundOID.OID)
|
||||
handler(ctx, h.newRequest(c, log), addr)
|
||||
handler(ctx, newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
// byAttribute is a wrapper similar to byNativeAddress.
|
||||
|
@ -265,7 +265,7 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
|
|||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(objID)
|
||||
|
||||
handler(ctx, h.newRequest(c, log), addr)
|
||||
handler(ctx, newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
||||
|
|
|
@ -517,7 +517,7 @@ func DoFuzzDownloadZipped(input []byte) int {
|
|||
r.SetUserValue("cid", cid)
|
||||
r.SetUserValue("prefix", prefix)
|
||||
|
||||
hc.Handler().DownloadZipped(r)
|
||||
hc.Handler().DownloadZip(r)
|
||||
|
||||
return fuzzSuccessExitCode
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (c *configMock) DefaultTimestamp() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) ZipCompression() bool {
|
||||
func (c *configMock) ArchiveCompression() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -250,7 +250,7 @@ func TestBasic(t *testing.T) {
|
|||
|
||||
t.Run("zip", func(t *testing.T) {
|
||||
r = prepareGetZipped(ctx, bktName, "")
|
||||
hc.Handler().DownloadZipped(r)
|
||||
hc.Handler().DownloadZip(r)
|
||||
|
||||
readerAt := bytes.NewReader(r.Response.Body())
|
||||
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
||||
|
|
|
@ -135,7 +135,7 @@ func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|||
return
|
||||
}
|
||||
|
||||
req := h.newRequest(c, log)
|
||||
req := newRequest(c, log)
|
||||
|
||||
var objID oid.ID
|
||||
if checkS3Err == nil {
|
||||
|
|
|
@ -42,7 +42,9 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
|
||||
if err = part.Close(); err != nil {
|
||||
l.Warn(logs.FailedToCloseReader, zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,19 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
|
@ -19,8 +25,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
jsonHeader = "application/json; charset=UTF-8"
|
||||
drainBufSize = 4096
|
||||
jsonHeader = "application/json; charset=UTF-8"
|
||||
drainBufSize = 4096
|
||||
explodeArchiveHeader = "X-Explode-Archive"
|
||||
)
|
||||
|
||||
type putResponse struct {
|
||||
|
@ -43,11 +50,7 @@ func (pr *putResponse) encode(w io.Writer) error {
|
|||
|
||||
// Upload handles multipart upload request.
|
||||
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||
var (
|
||||
file MultipartFile
|
||||
idObj oid.ID
|
||||
addr oid.Address
|
||||
)
|
||||
var file MultipartFile
|
||||
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
bodyStream := c.RequestBodyStream()
|
||||
|
@ -63,20 +66,6 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
|||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// If the temporary reader can be closed - let's close it.
|
||||
if file == nil {
|
||||
return
|
||||
}
|
||||
err := file.Close()
|
||||
log.Debug(
|
||||
logs.CloseTemporaryMultipartFormFile,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("filename", file.FileName()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}()
|
||||
|
||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||
|
@ -86,53 +75,69 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
|||
|
||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err))
|
||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
} else {
|
||||
now = parsed
|
||||
}
|
||||
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
|
||||
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
|
||||
} else {
|
||||
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
|
||||
}
|
||||
|
||||
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||
ResponseError(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
// Multipart is multipart and thus can contain more than one part which
|
||||
// we ignore at the moment. Also, when dealing with chunked encoding
|
||||
// the last zero-length chunk might be left unread (because multipart
|
||||
// reader only cares about its boundary and doesn't look further) and
|
||||
// it will be (erroneously) interpreted as the start of the next
|
||||
// pipelined header. Thus, we need to drain the body buffer.
|
||||
for {
|
||||
_, err = bodyStream.Read(drainBuf)
|
||||
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
|
||||
c, log := req.RequestCtx, req.log
|
||||
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
|
||||
|
||||
attributes, err := h.extractAttributes(c, log, filtered)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
attributes := make([]object.Attribute, 0, len(filtered))
|
||||
// prepares attributes from filtered headers
|
||||
for key, val := range filtered {
|
||||
attribute := object.NewAttribute()
|
||||
attribute.SetKey(key)
|
||||
attribute.SetValue(val)
|
||||
attributes = append(attributes, *attribute)
|
||||
idObj, err := h.uploadObject(c, bkt, attributes, file)
|
||||
if err != nil {
|
||||
h.handlePutFrostFSErr(c, err, log)
|
||||
return
|
||||
}
|
||||
// sets FileName attribute if it wasn't set from header
|
||||
if _, ok := filtered[object.AttributeFileName]; !ok {
|
||||
filename := object.NewAttribute()
|
||||
filename.SetKey(object.AttributeFileName)
|
||||
filename.SetValue(file.FileName())
|
||||
attributes = append(attributes, *filename)
|
||||
}
|
||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
||||
timestamp := object.NewAttribute()
|
||||
timestamp.SetKey(object.AttributeTimestamp)
|
||||
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
||||
attributes = append(attributes, *timestamp)
|
||||
log.Debug(logs.ObjectUploaded,
|
||||
zap.String("oid", idObj.EncodeToString()),
|
||||
zap.String("FileName", file.FileName()),
|
||||
)
|
||||
|
||||
addr := newAddress(bkt.CID, idObj)
|
||||
c.Response.Header.SetContentType(jsonHeader)
|
||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||
if err = newPutResponse(addr).encode(c); err != nil {
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(bktInfo.CID)
|
||||
obj.SetContainerID(bkt.CID)
|
||||
obj.SetOwnerID(*h.ownerID)
|
||||
obj.SetAttributes(attributes...)
|
||||
obj.SetAttributes(attrs...)
|
||||
|
||||
prm := PrmObjectCreate{
|
||||
PrmAuth: PrmAuth{
|
||||
|
@ -141,40 +146,120 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
|||
Object: obj,
|
||||
Payload: file,
|
||||
ClientCut: h.config.ClientCut(),
|
||||
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
|
||||
WithoutHomomorphicHash: bkt.HomomorphicHashDisabled,
|
||||
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
||||
}
|
||||
|
||||
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
||||
h.handlePutFrostFSErr(c, err, log)
|
||||
return
|
||||
idObj, err := h.frostfs.CreateObject(ctx, prm)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
addr.SetObject(idObj)
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
return idObj, nil
|
||||
}
|
||||
|
||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||
if err = newPutResponse(addr).encode(c); err != nil {
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
// Multipart is multipart and thus can contain more than one part which
|
||||
// we ignore at the moment. Also, when dealing with chunked encoding
|
||||
// the last zero-length chunk might be left unread (because multipart
|
||||
// reader only cares about its boundary and doesn't look further) and
|
||||
// it will be (erroneously) interpreted as the start of the next
|
||||
// pipelined header. Thus we need to drain the body buffer.
|
||||
for {
|
||||
_, err = bodyStream.Read(drainBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
|
||||
now := time.Now()
|
||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
} else {
|
||||
now = parsed
|
||||
}
|
||||
}
|
||||
// Report status code and content type.
|
||||
c.Response.SetStatusCode(fasthttp.StatusOK)
|
||||
c.Response.Header.SetContentType(jsonHeader)
|
||||
if err := utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
attributes := make([]object.Attribute, 0, len(filtered))
|
||||
// prepares attributes from filtered headers
|
||||
for key, val := range filtered {
|
||||
attribute := newAttribute(key, val)
|
||||
attributes = append(attributes, attribute)
|
||||
}
|
||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
||||
timestamp := newAttribute(object.AttributeTimestamp, strconv.FormatInt(time.Now().Unix(), 10))
|
||||
attributes = append(attributes, timestamp)
|
||||
}
|
||||
|
||||
return attributes, nil
|
||||
}
|
||||
|
||||
func newAttribute(key string, val string) object.Attribute {
|
||||
attr := object.NewAttribute()
|
||||
attr.SetKey(key)
|
||||
attr.SetValue(val)
|
||||
return *attr
|
||||
}
|
||||
|
||||
// explodeArchive read files from archive and creates objects for each of them.
|
||||
// Sets FilePath attribute with name from tar.Header.
|
||||
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
|
||||
c, log := req.RequestCtx, req.log
|
||||
|
||||
// remove user attributes which vary for each file in archive
|
||||
// to guarantee that they won't appear twice
|
||||
delete(filtered, object.AttributeFileName)
|
||||
delete(filtered, object.AttributeFilePath)
|
||||
|
||||
commonAttributes, err := h.extractAttributes(c, log, filtered)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
attributes := commonAttributes
|
||||
|
||||
reader := file
|
||||
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
||||
log.Debug(logs.GzipReaderSelected)
|
||||
gzipReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err))
|
||||
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := gzipReader.Close(); err != nil {
|
||||
log.Warn(logs.FailedToCloseReader, zap.Error(err))
|
||||
}
|
||||
}()
|
||||
reader = gzipReader
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(reader)
|
||||
for {
|
||||
obj, err := tarReader.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err))
|
||||
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if isDir(obj.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
// set varying attributes
|
||||
attributes = attributes[:len(commonAttributes)]
|
||||
fileName := filepath.Base(obj.Name)
|
||||
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
|
||||
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
|
||||
|
||||
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
|
||||
if err != nil {
|
||||
h.handlePutFrostFSErr(c, err, log)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug(logs.ObjectUploaded,
|
||||
zap.String("oid", idObj.EncodeToString()),
|
||||
zap.String("FileName", fileName),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
||||
|
|
|
@ -24,6 +24,13 @@ type request struct {
|
|||
log *zap.Logger
|
||||
}
|
||||
|
||||
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
|
||||
return request{
|
||||
RequestCtx: ctx,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||
logFields := []zap.Field{
|
||||
zap.Stringer("elapsed", time.Since(start)),
|
||||
|
@ -94,6 +101,13 @@ func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|||
return addr
|
||||
}
|
||||
|
||||
// setIfNotExist sets key value to map if key is not present yet.
|
||||
func setIfNotExist(m map[string]string, key, value string) {
|
||||
if _, ok := m[key]; !ok {
|
||||
m[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
|
||||
r.Error(msg+"\n", code)
|
||||
}
|
||||
|
|
|
@ -1,89 +1,81 @@
|
|||
package logs
|
||||
|
||||
const (
|
||||
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
||||
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
||||
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
||||
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
|
||||
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
||||
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
||||
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
||||
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
||||
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
||||
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
||||
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
|
||||
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
||||
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
||||
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
||||
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
|
||||
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
|
||||
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
|
||||
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
|
||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
|
||||
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
|
||||
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
|
||||
FailedToCreateWorkerPool = "failed to create worker pool" // Fatal in ../../app.go
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template" // Error in ../../app.go
|
||||
SetCustomIndexPageTemplate = "set custom index page template" // Info in ../../app.go
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
|
||||
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
|
||||
StartingApplication = "starting application" // Info in ../../app.go
|
||||
StartingServer = "starting server" // Info in ../../app.go
|
||||
ListenAndServe = "listen and serve" // Fatal in ../../app.go
|
||||
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
|
||||
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
|
||||
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
|
||||
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
|
||||
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
|
||||
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
|
||||
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
|
||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
|
||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
|
||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
|
||||
Request = "request" // Info in ../../app.go
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
|
||||
FailedToAddServer = "failed to add server" // Warn in ../../app.go
|
||||
AddServer = "add server" // Info in ../../app.go
|
||||
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
|
||||
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
|
||||
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
|
||||
UsingCredentials = "using credentials" // Info in ../../settings.go
|
||||
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
|
||||
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
|
||||
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
||||
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
||||
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
||||
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
|
||||
FailedToSumbitTaskToPool = "failed to submit task to pool" // Error in ../handler/browse.go
|
||||
FailedToHeadObject = "failed to head object" // Error in ../handler/browse.go
|
||||
FailedToIterateOverResponse = "failed to iterate over search response" // Error in ../handler/browse.go
|
||||
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
|
||||
CouldntParseCreationDate = "couldn't parse creation date"
|
||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
||||
CouldNotReceiveObject = "could not receive object"
|
||||
ObjectWasDeleted = "object was deleted"
|
||||
CouldNotSearchForObjects = "could not search for objects"
|
||||
ObjectNotFound = "object not found"
|
||||
ReadObjectListFailed = "read object list failed"
|
||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
||||
FailedToGetObject = "failed to get object"
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
||||
ObjectsNotFound = "objects not found"
|
||||
CloseZipWriter = "close zip writer"
|
||||
ServiceIsRunning = "service is running"
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
||||
ShuttingDownService = "shutting down service"
|
||||
CantShutDownService = "can't shut down service"
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
||||
CouldNotParseClientTime = "could not parse client time"
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
||||
CouldNotEncodeResponse = "could not encode response"
|
||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
||||
AddAttributeToResultObject = "add attribute to result object"
|
||||
FailedToCreateResolver = "failed to create resolver"
|
||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
||||
SetCustomIndexPageTemplate = "set custom index page template"
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
||||
MetricsAreDisabled = "metrics are disabled"
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
||||
StartingApplication = "starting application"
|
||||
StartingServer = "starting server"
|
||||
ListenAndServe = "listen and serve"
|
||||
ShuttingDownWebServer = "shutting down web server"
|
||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
||||
FailedToReloadConfig = "failed to reload config"
|
||||
LogLevelWontBeUpdated = "log level won't be updated"
|
||||
FailedToUpdateResolvers = "failed to update resolvers"
|
||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
||||
AddedPathUploadCid = "added path /upload/{cid}"
|
||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
|
||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
|
||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
|
||||
Request = "request"
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
||||
FailedToAddServer = "failed to add server"
|
||||
AddServer = "add server"
|
||||
NoHealthyServers = "no healthy servers"
|
||||
FailedToInitializeTracing = "failed to initialize tracing"
|
||||
TracingConfigUpdated = "tracing config updated"
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
||||
UsingCredentials = "using credentials"
|
||||
FailedToCreateConnectionPool = "failed to create connection pool"
|
||||
FailedToDialConnectionPool = "failed to dial connection pool"
|
||||
FailedToCreateTreePool = "failed to create tree pool"
|
||||
FailedToDialTreePool = "failed to dial tree pool"
|
||||
AddedStoragePeer = "added storage peer"
|
||||
CouldntGetBucket = "could not get bucket"
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
||||
FailedToHeadObject = "failed to head object"
|
||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
||||
InvalidCacheEntryType = "invalid cache entry type"
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
||||
FailedToUnescapeQuery = "failed to unescape query"
|
||||
FailedToParseAddressInTreeNode = "failed to parse object addr in tree node"
|
||||
SettingsNodeInvalidOwnerKey = "settings node: invalid owner key"
|
||||
SystemNodeHasMultipleIDs = "system node has multiple ids"
|
||||
FailedToRemoveOldSystemNode = "failed to remove old system node"
|
||||
BucketSettingsNodeHasMultipleIDs = "bucket settings node has multiple ids"
|
||||
ServerReconnecting = "reconnecting server..."
|
||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||
ServerReconnectFailed = "failed to reconnect server"
|
||||
|
@ -94,4 +86,13 @@ const (
|
|||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
FailedToFilterHeaders = "failed to filter headers"
|
||||
FailedToReadFileFromTar = "failed to read file from tar"
|
||||
FailedToGetAttributes = "failed to get attributes"
|
||||
ObjectUploaded = "object uploaded"
|
||||
CloseGzipWriter = "close gzip writer"
|
||||
CloseTarWriter = "close tar writer"
|
||||
FailedToCloseReader = "failed to close reader"
|
||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
||||
GzipReaderSelected = "gzip reader selected"
|
||||
)
|
||||
|
|
Loading…
Add table
Reference in a new issue