forked from TrueCloudLab/frostfs-http-gw
Compare commits
10 commits
ca426fff4d
...
db31e6f510
Author | SHA1 | Date | |
---|---|---|---|
db31e6f510 | |||
fc86ab3511 | |||
495f745535 | |||
8fe8f2dcc2 | |||
77eb474581 | |||
c8473498ae | |||
a4233b006c | |||
7e80f0cce6 | |||
843708a558 | |||
77ffde58e9 |
34 changed files with 1694 additions and 401 deletions
|
@ -4,6 +4,10 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- Support percent-encoding for GET queries (#134)
|
||||
- Add `trace_id` to logs (#148)
|
||||
|
||||
### Changed
|
||||
- Update go version to 1.22 (#132)
|
||||
|
||||
|
|
26
SECURITY.md
Normal file
26
SECURITY.md
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Security Policy
|
||||
|
||||
|
||||
## How To Report a Vulnerability
|
||||
|
||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
||||
|
||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
||||
|
||||
Instead, you can report it using one of the following ways:
|
||||
|
||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
||||
|
||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
||||
|
||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
||||
* Affected version(s)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Any log files that are related to this issue (if possible)
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
|
||||
This information will help us triage your report more quickly.
|
|
@ -3,8 +3,10 @@ package main
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
@ -17,11 +19,11 @@ import (
|
|||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
|
@ -40,6 +42,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
@ -90,6 +93,8 @@ type (
|
|||
defaultTimestamp bool
|
||||
zipCompression bool
|
||||
clientCut bool
|
||||
returnIndexPage bool
|
||||
indexPageTemplate string
|
||||
bufferMaxSizeForPut uint64
|
||||
namespaceHeader string
|
||||
defaultNamespaces []string
|
||||
|
@ -154,6 +159,7 @@ func newApp(ctx context.Context, opt ...Option) App {
|
|||
a.initResolver()
|
||||
a.initMetrics()
|
||||
a.initTracing(ctx)
|
||||
a.loadIndexPageTemplate()
|
||||
|
||||
return a
|
||||
}
|
||||
|
@ -176,12 +182,59 @@ func (s *appSettings) ZipCompression() bool {
|
|||
return s.zipCompression
|
||||
}
|
||||
|
||||
func (s *appSettings) IndexPageEnabled() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.returnIndexPage
|
||||
}
|
||||
|
||||
func (s *appSettings) IndexPageTemplate() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.indexPageTemplate == "" {
|
||||
return templates.DefaultIndexTemplate
|
||||
}
|
||||
return s.indexPageTemplate
|
||||
}
|
||||
|
||||
func (s *appSettings) setZipCompression(val bool) {
|
||||
s.mu.Lock()
|
||||
s.zipCompression = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) setReturnIndexPage(val bool) {
|
||||
s.mu.Lock()
|
||||
s.returnIndexPage = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) setIndexTemplate(val string) {
|
||||
s.mu.Lock()
|
||||
s.indexPageTemplate = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (a *app) loadIndexPageTemplate() {
|
||||
if !a.settings.IndexPageEnabled() {
|
||||
return
|
||||
}
|
||||
reader, err := os.Open(a.cfg.GetString(cfgIndexPageTemplatePath))
|
||||
if err != nil {
|
||||
a.settings.setIndexTemplate("")
|
||||
a.log.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||
return
|
||||
}
|
||||
tmpl, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
a.settings.setIndexTemplate("")
|
||||
a.log.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||
return
|
||||
}
|
||||
a.settings.setIndexTemplate(string(tmpl))
|
||||
a.log.Info(logs.SetCustomIndexPageTemplate)
|
||||
}
|
||||
|
||||
func (s *appSettings) ClientCut() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
@ -400,7 +453,7 @@ func (a *app) setHealthStatus() {
|
|||
}
|
||||
|
||||
func (a *app) Serve() {
|
||||
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(services.NewPoolWrapper(a.treePool)))
|
||||
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)))
|
||||
|
||||
// Configure router.
|
||||
a.configureRouter(handler)
|
||||
|
@ -490,6 +543,7 @@ func (a *app) configReload(ctx context.Context) {
|
|||
|
||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.initTracing(ctx)
|
||||
a.loadIndexPageTemplate()
|
||||
a.setHealthStatus()
|
||||
|
||||
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||
|
@ -498,6 +552,7 @@ func (a *app) configReload(ctx context.Context) {
|
|||
func (a *app) updateSettings() {
|
||||
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||
a.settings.setReturnIndexPage(a.cfg.GetBool(cfgIndexPageEnabled))
|
||||
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
||||
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
||||
|
@ -535,15 +590,19 @@ func (a *app) configureRouter(handler *handler.Handler) {
|
|||
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
r.POST("/upload/{cid}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.Upload))))))
|
||||
r.POST("/upload/{cid}", a.tracer(a.logger(a.canonicalizer(a.tokenizer(a.reqNamespace(handler.Upload))))))
|
||||
a.log.Info(logs.AddedPathUploadCid)
|
||||
r.GET("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAddressOrBucketName))))))
|
||||
r.HEAD("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAddressOrBucketName))))))
|
||||
r.GET("/get/{cid}/{oid:*}", a.tracer(a.logger(a.canonicalizer(a.tokenizer(a.reqNamespace(handler.DownloadByAddressOrBucketName))))))
|
||||
r.OPTIONS("/get/{cid}/{oid:*}", func(ctx *fasthttp.RequestCtx) {
|
||||
ctx.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, "*")
|
||||
ctx.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, "Authorization")
|
||||
})
|
||||
r.HEAD("/get/{cid}/{oid:*}", a.tracer(a.logger(a.canonicalizer(a.tokenizer(a.reqNamespace(handler.HeadByAddressOrBucketName))))))
|
||||
a.log.Info(logs.AddedPathGetCidOid)
|
||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAttribute))))))
|
||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAttribute))))))
|
||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.tracer(a.logger(a.canonicalizer(a.tokenizer(a.reqNamespace(handler.DownloadByAttribute))))))
|
||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.tracer(a.logger(a.canonicalizer(a.tokenizer(a.reqNamespace(handler.HeadByAttribute))))))
|
||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadZipped))))))
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.tracer(a.logger(a.canonicalizer(a.tokenizer(a.reqNamespace(handler.DownloadZipped))))))
|
||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||
|
||||
a.webServer.Handler = r.Handler
|
||||
|
@ -551,11 +610,24 @@ func (a *app) configureRouter(handler *handler.Handler) {
|
|||
|
||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
a.log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||
requiredFields := []zap.Field{zap.Uint64("id", req.ID())}
|
||||
reqCtx := utils.GetContextFromRequest(req)
|
||||
if traceID := trace.SpanFromContext(reqCtx).SpanContext().TraceID(); traceID.IsValid() {
|
||||
requiredFields = append(requiredFields, zap.String("trace_id", traceID.String()))
|
||||
}
|
||||
log := a.log.With(requiredFields...)
|
||||
|
||||
reqCtx = utils.SetReqLog(reqCtx, log)
|
||||
utils.SetContextToRequest(reqCtx, req)
|
||||
|
||||
fields := []zap.Field{
|
||||
zap.String("remote", req.RemoteAddr().String()),
|
||||
zap.ByteString("method", req.Method()),
|
||||
zap.ByteString("path", req.Path()),
|
||||
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||
zap.Uint64("id", req.ID()))
|
||||
}
|
||||
|
||||
log.Info(logs.Request, fields...)
|
||||
h(req)
|
||||
}
|
||||
}
|
||||
|
@ -594,9 +666,12 @@ func (a *app) canonicalizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||
|
||||
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
|
||||
reqCtx := utils.GetContextFromRequest(req)
|
||||
appCtx, err := tokens.StoreBearerTokenAppCtx(reqCtx, req)
|
||||
if err != nil {
|
||||
a.log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Uint64("id", req.ID()), zap.Error(err))
|
||||
log := utils.GetReqLogOrDefault(reqCtx, a.log)
|
||||
|
||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
|
||||
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -607,9 +682,7 @@ func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|||
|
||||
func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
appCtx := utils.GetContextFromRequest(req)
|
||||
|
||||
appCtx, span := utils.StartHTTPServerSpan(appCtx, req, "REQUEST")
|
||||
appCtx, span := utils.StartHTTPServerSpan(a.ctx, req, "REQUEST")
|
||||
defer func() {
|
||||
utils.SetHTTPTraceInfo(appCtx, span, req)
|
||||
span.End()
|
||||
|
@ -737,6 +810,22 @@ func (a *app) initTracing(ctx context.Context) {
|
|||
InstanceID: instanceID,
|
||||
Version: Version,
|
||||
}
|
||||
|
||||
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
caBytes, err := os.ReadFile(trustedCa)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
certPool := x509.NewCertPool()
|
||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||
if !ok {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||
return
|
||||
}
|
||||
cfg.ServerCaCertPool = certPool
|
||||
}
|
||||
|
||||
updated, err := tracing.Setup(ctx, cfg)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
|
|
|
@ -41,6 +41,8 @@ const (
|
|||
defaultConnectTimeout = 10 * time.Second
|
||||
defaultStreamTimeout = 10 * time.Second
|
||||
|
||||
defaultLoggerSamplerInterval = 1 * time.Second
|
||||
|
||||
defaultShutdownTimeout = 15 * time.Second
|
||||
|
||||
defaultPoolErrorThreshold uint32 = 100
|
||||
|
@ -60,6 +62,9 @@ const (
|
|||
|
||||
cfgReconnectInterval = "reconnect_interval"
|
||||
|
||||
cfgIndexPageEnabled = "index_page.enabled"
|
||||
cfgIndexPageTemplatePath = "index_page.template_path"
|
||||
|
||||
// Web.
|
||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
||||
|
@ -75,9 +80,10 @@ const (
|
|||
cfgPprofAddress = "pprof.address"
|
||||
|
||||
// Tracing ...
|
||||
cfgTracingEnabled = "tracing.enabled"
|
||||
cfgTracingExporter = "tracing.exporter"
|
||||
cfgTracingEndpoint = "tracing.endpoint"
|
||||
cfgTracingEnabled = "tracing.enabled"
|
||||
cfgTracingExporter = "tracing.exporter"
|
||||
cfgTracingEndpoint = "tracing.endpoint"
|
||||
cfgTracingTrustedCa = "tracing.trusted_ca"
|
||||
|
||||
// Pool config.
|
||||
cfgConTimeout = "connect_timeout"
|
||||
|
@ -90,6 +96,11 @@ const (
|
|||
cfgLoggerLevel = "logger.level"
|
||||
cfgLoggerDestination = "logger.destination"
|
||||
|
||||
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
|
||||
cfgLoggerSamplingInitial = "logger.sampling.initial"
|
||||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||
|
||||
// Wallet.
|
||||
cfgWalletPassphrase = "wallet.passphrase"
|
||||
cfgWalletPath = "wallet.path"
|
||||
|
@ -187,10 +198,17 @@ func settings() *viper.Viper {
|
|||
// logger:
|
||||
v.SetDefault(cfgLoggerLevel, "debug")
|
||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
||||
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
||||
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
||||
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
||||
|
||||
// pool:
|
||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||
|
||||
v.SetDefault(cfgIndexPageEnabled, false)
|
||||
v.SetDefault(cfgIndexPageTemplatePath, "")
|
||||
|
||||
// frostfs:
|
||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
||||
|
||||
|
@ -385,9 +403,9 @@ func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
|||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(lvl)
|
||||
return newStdoutLogger(v, lvl)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(lvl)
|
||||
return newJournaldLogger(v, lvl)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
|
@ -404,39 +422,59 @@ func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
|||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||
c := zap.NewProductionConfig()
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
c.Encoding = "console"
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||
stdout := zapcore.AddSync(os.Stderr)
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
l, err := c.Build(
|
||||
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("build zap logger instance: %v", err))
|
||||
}
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||
consoleOutCore = samplingEnabling(v, consoleOutCore)
|
||||
|
||||
return l, c.Level
|
||||
l := zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
return l, level
|
||||
}
|
||||
|
||||
func newJournaldLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||
c := zap.NewProductionConfig()
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = samplingEnabling(v, coreWithContext)
|
||||
|
||||
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
|
||||
return l, c.Level
|
||||
return l, level
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
func samplingEnabling(v *viper.Viper, core zapcore.Core) zapcore.Core {
|
||||
// Zap samples by logging the first cgfLoggerSamplingInitial entries with a given level
|
||||
// and message within the specified time interval.
|
||||
// In the above config, only the first cgfLoggerSamplingInitial log entries with the same level and message
|
||||
// are recorded in cfgLoggerSamplingInterval interval. Every other log entry will be dropped within the interval since
|
||||
// cfgLoggerSamplingThereafter is specified here.
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(
|
||||
core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
)
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
|
|
|
@ -14,8 +14,12 @@ HTTP_GW_PPROF_ADDRESS=localhost:8083
|
|||
HTTP_GW_PROMETHEUS_ENABLED=true
|
||||
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
|
||||
|
||||
# Log level.
|
||||
# Logger.
|
||||
HTTP_GW_LOGGER_LEVEL=debug
|
||||
HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
||||
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
||||
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
||||
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
||||
|
||||
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
||||
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
||||
|
@ -99,6 +103,7 @@ HTTP_GW_ZIP_COMPRESSION=false
|
|||
HTTP_GW_TRACING_ENABLED=true
|
||||
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
||||
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||
HTTP_GW_TRACING_TRUSTED_CA=""
|
||||
|
||||
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||
|
||||
|
|
|
@ -13,10 +13,16 @@ tracing:
|
|||
enabled: true
|
||||
exporter: "otlp_grpc"
|
||||
endpoint: "localhost:4317"
|
||||
trusted_ca: ""
|
||||
|
||||
logger:
|
||||
level: debug # Log level.
|
||||
destination: stdout
|
||||
sampling:
|
||||
enabled: false
|
||||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
|
||||
server:
|
||||
- address: 0.0.0.0:8080
|
||||
|
@ -101,6 +107,11 @@ request_timeout: 5s # Timeout to check node health during rebalance.
|
|||
rebalance_timer: 30s # Interval to check nodes health.
|
||||
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
||||
|
||||
# Enable index page to see objects list for specified container and prefix
|
||||
index_page:
|
||||
enabled: false
|
||||
template_path: internal/handler/templates/index.gotmpl
|
||||
|
||||
zip:
|
||||
compression: false # Enable zip compression to download files by common prefix.
|
||||
|
||||
|
@ -126,4 +137,4 @@ cache:
|
|||
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
default_namespaces: [ "", "root" ]
|
||||
default_namespaces: [ "", "root" ]
|
||||
|
|
13
docs/api.md
13
docs/api.md
|
@ -95,12 +95,12 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|
|||
|
||||
## Get object
|
||||
|
||||
Route: `/get/{cid}/{oid}?[download=true]`
|
||||
Route: `/get/{cid}/{oid}?[download=false]`
|
||||
|
||||
| Route parameter | Type | Description |
|
||||
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
||||
| `oid` | Single | Base58 encoded object ID. |
|
||||
| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
|
||||
| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
|
||||
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
||||
|
||||
### Methods
|
||||
|
@ -141,6 +141,13 @@ Get an object (payload and attributes) by an address.
|
|||
| 400 | Some error occurred during object downloading. |
|
||||
| 404 | Container or object not found. |
|
||||
|
||||
###### Body
|
||||
|
||||
Returns object data. If request performed from browser, either displays raw data or downloads it as
|
||||
attachment if `download` query parameter is set to `true`.
|
||||
If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
|
||||
S3-name was found.
|
||||
|
||||
#### HEAD
|
||||
|
||||
Get an object attributes by an address.
|
||||
|
|
|
@ -57,6 +57,7 @@ $ cat http.log
|
|||
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
||||
| `cache` | [Cache configuration](#cache-section) |
|
||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||
| `index_page` | [Index page configuration](#index_page-section) |
|
||||
|
||||
|
||||
# General section
|
||||
|
@ -75,16 +76,16 @@ pool_error_threshold: 100
|
|||
reconnect_interval: 1m
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
|
||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
||||
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
|
||||
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------------------|------------|---------------|---------------|-------------------------------------------------------------------------------------------------|
|
||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
||||
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
|
||||
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
||||
|
||||
# `wallet` section
|
||||
|
||||
|
@ -164,12 +165,21 @@ server:
|
|||
logger:
|
||||
level: debug
|
||||
destination: stdout
|
||||
sampling:
|
||||
enabled: false
|
||||
initial: 100
|
||||
thereafter: 100
|
||||
interval: 1s
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|---------------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||
| `sampling.enabled` | `bool` | no | false | Sampling enabling flag. |
|
||||
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
||||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
||||
|
||||
# `web` section
|
||||
|
||||
|
@ -256,13 +266,15 @@ tracing:
|
|||
enabled: true
|
||||
exporter: "otlp_grpc"
|
||||
endpoint: "localhost:4317"
|
||||
trusted_ca: "/etc/ssl/telemetry-trusted-ca.pem"
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------|----------|---------------|------------------|---------------------------------------------------------------|
|
||||
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
||||
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
||||
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|--------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `enabled` | `bool` | no | `false` | Flag to enable the tracing. |
|
||||
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
||||
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
||||
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
|
||||
|
||||
# `runtime` section
|
||||
Contains runtime parameters.
|
||||
|
@ -335,4 +347,19 @@ resolve_bucket:
|
|||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
||||
|
||||
# `index_page` section
|
||||
|
||||
Parameters for index HTML-page output with S3-bucket or S3-subdir content for `Get object` request
|
||||
|
||||
```yaml
|
||||
index_page:
|
||||
enabled: false
|
||||
template_path: ""
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
|
||||
| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
|
||||
| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
|
||||
|
|
58
go.mod
58
go.mod
|
@ -3,11 +3,12 @@ module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
|||
go 1.22
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240718141740-ce8270568d36
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/fasthttp/router v1.4.1
|
||||
github.com/nspcc-dev/neo-go v0.106.2
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
|
@ -19,12 +20,12 @@ require (
|
|||
github.com/testcontainers/testcontainers-go v0.13.0
|
||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
||||
github.com/valyala/fasthttp v1.34.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/net v0.23.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
golang.org/x/net v0.26.0
|
||||
google.golang.org/grpc v1.66.2
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -36,11 +37,12 @@ require (
|
|||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.6.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
@ -49,22 +51,22 @@ require (
|
|||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.14+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.16.4 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mount v0.3.2 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||
|
@ -95,24 +97,22 @@ require (
|
|||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.9 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
137
go.sum
137
go.sum
|
@ -37,16 +37,16 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164 h1:XxvwQKJT/f16qS3df5PBQPRYKkhy0/A7zH6644QpKD0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240718141740-ce8270568d36 h1:MV/vKJWLQT34RRbXYvkNKFYGNjL5bRNuCQMXkbC7fLI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240718141740-ce8270568d36/go.mod h1:vluJ/+yQMcq8ZIZZSA7Te+JKClr0lgtRErjICvb8wto=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
|
@ -101,6 +101,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
|||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
|
@ -137,13 +139,13 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
|
|||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
|
@ -160,11 +162,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
|
@ -336,7 +334,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
|||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fasthttp/router v1.4.1 h1:3xPUO+hy/HAkgGDSd5sX5w18cyGDIFbC7vip8KwPDk8=
|
||||
|
@ -366,8 +363,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
|
@ -402,9 +399,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -436,8 +430,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -504,9 +496,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
|
@ -534,6 +525,8 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht
|
|||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
@ -575,6 +568,8 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN
|
|||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
|
@ -739,8 +734,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
|||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -874,25 +869,23 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc=
|
||||
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
|
||||
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
|
||||
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
|
||||
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
|
@ -921,8 +914,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
|
|||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -959,8 +952,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1012,8 +1005,8 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1023,7 +1016,6 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1035,8 +1027,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1127,23 +1119,22 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1213,8 +1204,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1290,13 +1281,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
@ -1320,9 +1308,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
|
|||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
|
||||
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
|
||||
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1336,8 +1323,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
type NodeVersion struct {
|
||||
BaseNodeVersion
|
||||
DeleteMarker bool
|
||||
IsPrefixNode bool
|
||||
}
|
||||
|
||||
// BaseNodeVersion is minimal node info from tree service.
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||
)
|
||||
|
||||
type GetNodeByPathResponseInfoWrapper struct {
|
||||
response *grpcService.GetNodeByPathResponse_Info
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type PoolWrapper struct {
|
||||
p *treepool.Pool
|
||||
}
|
||||
|
||||
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||
return &PoolWrapper{p: p}
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
poolPrm := treepool.GetNodesParams{
|
||||
CID: prm.CnrID,
|
||||
TreeID: prm.TreeID,
|
||||
Path: prm.Path,
|
||||
Meta: prm.Meta,
|
||||
PathAttribute: tree.FileNameKey,
|
||||
LatestOnly: prm.LatestOnly,
|
||||
AllAttrs: prm.AllAttrs,
|
||||
BearerToken: getBearer(ctx),
|
||||
}
|
||||
|
||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
res := make([]tree.NodeResponse, len(nodes))
|
||||
for i, info := range nodes {
|
||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getBearer(ctx context.Context) []byte {
|
||||
token, err := tokens.LoadBearerToken(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return token.Marshal()
|
||||
}
|
||||
|
||||
func handleError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
159
internal/handler/browse.go
Normal file
159
internal/handler/browse.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
dateFormat = "02-01-2006 15:04"
|
||||
attrOID = "OID"
|
||||
attrCreated = "Created"
|
||||
attrFileName = "FileName"
|
||||
attrSize = "Size"
|
||||
)
|
||||
|
||||
type (
|
||||
BrowsePageData struct {
|
||||
BucketName,
|
||||
Prefix string
|
||||
Objects []ResponseObject
|
||||
}
|
||||
ResponseObject struct {
|
||||
OID string
|
||||
Created string
|
||||
FileName string
|
||||
Size string
|
||||
IsDir bool
|
||||
}
|
||||
)
|
||||
|
||||
func parseTimestamp(tstamp string) (time.Time, error) {
|
||||
millis, err := strconv.ParseInt(tstamp, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
return time.UnixMilli(millis), nil
|
||||
}
|
||||
|
||||
func NewResponseObject(nodes map[string]string) ResponseObject {
|
||||
return ResponseObject{
|
||||
OID: nodes[attrOID],
|
||||
Created: nodes[attrCreated],
|
||||
FileName: nodes[attrFileName],
|
||||
Size: nodes[attrSize],
|
||||
IsDir: nodes[attrOID] == "",
|
||||
}
|
||||
}
|
||||
|
||||
func formatTimestamp(strdate string) string {
|
||||
date, err := parseTimestamp(strdate)
|
||||
if err != nil || date.IsZero() {
|
||||
return ""
|
||||
}
|
||||
|
||||
return date.Format(dateFormat)
|
||||
}
|
||||
|
||||
func formatSize(strsize string) string {
|
||||
size, err := strconv.ParseFloat(strsize, 64)
|
||||
if err != nil {
|
||||
return "0B"
|
||||
}
|
||||
return units.HumanSize(size)
|
||||
}
|
||||
|
||||
func parentDir(prefix string) string {
|
||||
index := strings.LastIndex(prefix, "/")
|
||||
if index == -1 {
|
||||
return prefix
|
||||
}
|
||||
return prefix[index:]
|
||||
}
|
||||
|
||||
func trimPrefix(encPrefix string) string {
|
||||
prefix, err := url.PathUnescape(encPrefix)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
slashIndex := strings.LastIndex(prefix, "/")
|
||||
if slashIndex == -1 {
|
||||
return ""
|
||||
}
|
||||
return prefix[:slashIndex]
|
||||
}
|
||||
|
||||
func urlencode(prefix, filename string) string {
|
||||
var res strings.Builder
|
||||
path := filename
|
||||
if prefix != "" {
|
||||
path = strings.Join([]string{prefix, filename}, "/")
|
||||
}
|
||||
prefixParts := strings.Split(path, "/")
|
||||
for _, prefixPart := range prefixParts {
|
||||
prefixPart = "/" + url.PathEscape(prefixPart)
|
||||
if prefixPart == "/." || prefixPart == "/.." {
|
||||
prefixPart = url.PathEscape(prefixPart)
|
||||
}
|
||||
res.WriteString(prefixPart)
|
||||
}
|
||||
|
||||
return res.String()
|
||||
}
|
||||
|
||||
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, bucketInfo *data.BucketInfo, prefix string) {
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(zap.String("bucket", bucketInfo.Name))
|
||||
|
||||
nodes, err := h.listObjects(ctx, bucketInfo, prefix)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
respObjects := make([]ResponseObject, len(nodes))
|
||||
|
||||
for i, node := range nodes {
|
||||
respObjects[i] = NewResponseObject(node)
|
||||
}
|
||||
|
||||
sort.Slice(respObjects, func(i, j int) bool {
|
||||
if respObjects[i].IsDir == respObjects[j].IsDir {
|
||||
return respObjects[i].FileName < respObjects[j].FileName
|
||||
}
|
||||
return respObjects[i].IsDir
|
||||
})
|
||||
indexTemplate := h.config.IndexPageTemplate()
|
||||
|
||||
tmpl, err := template.New("index").Funcs(template.FuncMap{
|
||||
"formatTimestamp": formatTimestamp,
|
||||
"formatSize": formatSize,
|
||||
"trimPrefix": trimPrefix,
|
||||
"urlencode": urlencode,
|
||||
"parentDir": parentDir,
|
||||
}).Parse(indexTemplate)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
if err = tmpl.Execute(c, &BrowsePageData{
|
||||
BucketName: bucketInfo.Name,
|
||||
Prefix: prefix,
|
||||
Objects: respObjects,
|
||||
}); err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
}
|
|
@ -84,16 +84,17 @@ func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
|||
scid, _ := c.UserValue("cid").(string)
|
||||
prefix, _ := c.UserValue("prefix").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
|
||||
prefix, err := url.QueryUnescape(prefix)
|
||||
if err != nil {
|
||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
||||
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log := h.log.With(zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()))
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
|
|
|
@ -229,6 +229,10 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (Res
|
|||
return &resObjectSearchMock{res: res}, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
|
||||
for _, attr := range attributes {
|
||||
if attr.Key() == filter.Header() {
|
||||
|
@ -269,10 +273,3 @@ func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID o
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnr)
|
||||
addr.SetObject(obj)
|
||||
return addr
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ type Config interface {
|
|||
DefaultTimestamp() bool
|
||||
ZipCompression() bool
|
||||
ClientCut() bool
|
||||
IndexPageEnabled() bool
|
||||
IndexPageTemplate() string
|
||||
BufferMaxSizeForPut() uint64
|
||||
NamespaceHeader() string
|
||||
}
|
||||
|
@ -117,6 +119,14 @@ type PrmObjectSearch struct {
|
|||
Filters object.SearchFilters
|
||||
}
|
||||
|
||||
type PrmInitMultiObjectReader struct {
|
||||
// payload range
|
||||
Off, Ln uint64
|
||||
|
||||
Addr oid.Address
|
||||
Bearer *bearer.Token
|
||||
}
|
||||
|
||||
type ResObjectSearch interface {
|
||||
Read(buf []oid.ID) (int, error)
|
||||
Iterate(f func(oid.ID) bool) error
|
||||
|
@ -138,6 +148,8 @@ type FrostFS interface {
|
|||
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
|
||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
||||
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
|
||||
InitMultiObjectReader(ctx context.Context, p PrmInitMultiObjectReader) (io.Reader, error)
|
||||
|
||||
utils.EpochInfoFetcher
|
||||
}
|
||||
|
||||
|
@ -178,13 +190,12 @@ func New(params *AppParams, config Config, tree *tree.Tree) *Handler {
|
|||
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// prepares request and object address to it.
|
||||
func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||
var (
|
||||
idCnr, _ = c.UserValue("cid").(string)
|
||||
idObj, _ = c.UserValue("oid").(string)
|
||||
log = h.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||
)
|
||||
idCnr, _ := c.UserValue("cid").(string)
|
||||
idObj, _ := c.UserValue("oid").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
|
||||
if err != nil {
|
||||
|
@ -199,52 +210,61 @@ func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, requ
|
|||
return
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(*objID)
|
||||
addr := newAddress(bktInfo.CID, *objID)
|
||||
|
||||
f(ctx, *h.newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// prepares request and object address to it.
|
||||
func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||
var (
|
||||
bucketname = req.UserValue("cid").(string)
|
||||
key = req.UserValue("oid").(string)
|
||||
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||
)
|
||||
func (h *Handler) byObjectName(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||
bucketname := c.UserValue("cid").(string)
|
||||
key := c.UserValue("oid").(string)
|
||||
download := c.QueryArgs().GetBool("download")
|
||||
|
||||
ctx := utils.GetContextFromRequest(req)
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||
unescapedKey, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
logAndSendBucketError(req, log, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, key)
|
||||
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
|
||||
if h.config.IndexPageEnabled() && !download && string(c.Method()) != fasthttp.MethodHead {
|
||||
if isDir(unescapedKey) || isContainerRoot(unescapedKey) {
|
||||
if code := checkErrorType(err); code == fasthttp.StatusNotFound || code == fasthttp.StatusOK {
|
||||
c.SetStatusCode(code)
|
||||
h.browseObjects(c, bktInfo, unescapedKey)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||
response.Error(req, "Access Denied", fasthttp.StatusForbidden)
|
||||
return
|
||||
response.Error(c, "Access Denied", fasthttp.StatusForbidden)
|
||||
} else {
|
||||
response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
|
||||
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||
}
|
||||
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||
|
||||
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if foundOid.DeleteMarker {
|
||||
log.Error(logs.ObjectWasDeleted)
|
||||
response.Error(req, "object deleted", fasthttp.StatusNotFound)
|
||||
response.Error(c, "object deleted", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
addr := newAddress(bktInfo.CID, foundOid.OID)
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(foundOid.OID)
|
||||
|
||||
f(ctx, *h.newRequest(req, log), addr)
|
||||
f(ctx, *h.newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
// byAttribute is a wrapper similar to byAddress.
|
||||
|
@ -253,23 +273,24 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, re
|
|||
key, _ := c.UserValue("attr_key").(string)
|
||||
val, _ := c.UserValue("attr_val").(string)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Error(err))
|
||||
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
val, err = url.QueryUnescape(val)
|
||||
if err != nil {
|
||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Error(err))
|
||||
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log := h.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
log = log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
|
@ -373,3 +394,25 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
|
|||
|
||||
return bktInfo, err
|
||||
}
|
||||
|
||||
func (h *Handler) listObjects(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) ([]map[string]string, error) {
|
||||
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var objects = make([]map[string]string, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
meta := node.GetMeta()
|
||||
if meta == nil {
|
||||
continue
|
||||
}
|
||||
var obj = make(map[string]string, len(meta))
|
||||
for _, m := range meta {
|
||||
obj[m.GetKey()] = string(m.GetValue())
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
|
@ -37,6 +38,10 @@ func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type configMock struct {
|
||||
}
|
||||
|
||||
|
@ -48,6 +53,17 @@ func (c *configMock) ZipCompression() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) IndexPageEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) IndexPageTemplatePath() string {
|
||||
return ""
|
||||
}
|
||||
func (c *configMock) IndexPageTemplate() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *configMock) ClientCut() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const attributeMultipartObjectSize = "S3-Multipart-Object-Size"
|
||||
|
||||
// MultipartFile provides standard ReadCloser interface and also allows one to
|
||||
// get file name, it's used for multipart uploads.
|
||||
type MultipartFile interface {
|
||||
|
@ -45,3 +49,30 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
return part, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
|
||||
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
|
||||
cid, ok := p.obj.Header.ContainerID()
|
||||
if !ok {
|
||||
return nil, 0, errors.New("no container id set")
|
||||
}
|
||||
oid, ok := p.obj.Header.ID()
|
||||
if !ok {
|
||||
return nil, 0, errors.New("no object id set")
|
||||
}
|
||||
size, err := strconv.ParseUint(p.strSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
ctx := p.req.RequestCtx
|
||||
params := PrmInitMultiObjectReader{
|
||||
Addr: newAddress(cid, oid),
|
||||
Bearer: bearerToken(ctx),
|
||||
}
|
||||
payload, err := h.frostfs.InitMultiObjectReader(ctx, params)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return io.NopCloser(payload), size, nil
|
||||
}
|
||||
|
|
|
@ -47,19 +47,26 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (str
|
|||
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||
}
|
||||
|
||||
func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oid.Address) {
|
||||
type getMultiobjectBodyParams struct {
|
||||
obj *Object
|
||||
req request
|
||||
strSize string
|
||||
}
|
||||
|
||||
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
|
||||
var (
|
||||
err error
|
||||
dis = "inline"
|
||||
start = time.Now()
|
||||
filename string
|
||||
shouldDownload = req.QueryArgs().GetBool("download")
|
||||
start = time.Now()
|
||||
filename string
|
||||
filepath string
|
||||
contentType string
|
||||
)
|
||||
|
||||
prm := PrmObjectGet{
|
||||
PrmAuth: PrmAuth{
|
||||
BearerToken: bearerToken(ctx),
|
||||
},
|
||||
Address: objectAddress,
|
||||
Address: objAddress,
|
||||
}
|
||||
|
||||
rObj, err := h.frostfs.GetObject(ctx, prm)
|
||||
|
@ -69,15 +76,9 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oi
|
|||
}
|
||||
|
||||
// we can't close reader in this function, so how to do it?
|
||||
|
||||
if req.Request.URI().QueryArgs().GetBool("download") {
|
||||
dis = "attachment"
|
||||
}
|
||||
|
||||
req.setIDs(rObj.Header)
|
||||
payload := rObj.Payload
|
||||
payloadSize := rObj.Header.PayloadSize()
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||
var contentType string
|
||||
for _, attr := range rObj.Header.Attributes() {
|
||||
key := attr.Key()
|
||||
val := attr.Value()
|
||||
|
@ -92,29 +93,42 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oi
|
|||
case object.AttributeFileName:
|
||||
filename = val
|
||||
case object.AttributeTimestamp:
|
||||
value, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
req.log.Info(logs.CouldntParseCreationDate,
|
||||
zap.String("key", key),
|
||||
if err = req.setTimestamp(val); err != nil {
|
||||
req.log.Error(logs.CouldntParseCreationDate,
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
case object.AttributeFilePath:
|
||||
filepath = val
|
||||
case attributeMultipartObjectSize:
|
||||
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
|
||||
obj: rObj,
|
||||
req: req,
|
||||
strSize: val,
|
||||
})
|
||||
if err != nil {
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if filename == "" {
|
||||
filename = filepath
|
||||
}
|
||||
|
||||
idsToResponse(&req.Response, &rObj.Header)
|
||||
req.setDisposition(shouldDownload, filename)
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, "*")
|
||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||
|
||||
if len(contentType) == 0 {
|
||||
// determine the Content-Type from the payload head
|
||||
var payloadHead []byte
|
||||
|
||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||
return rObj.Payload, nil
|
||||
return payload, nil
|
||||
})
|
||||
if err != nil && err != io.EOF {
|
||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||
|
@ -126,16 +140,46 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oi
|
|||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
||||
|
||||
if err != io.EOF { // otherwise, we've already read full payload
|
||||
headReader = io.MultiReader(headReader, rObj.Payload)
|
||||
headReader = io.MultiReader(headReader, payload)
|
||||
}
|
||||
|
||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
||||
// if it implements io.Closer and that's useful for us.
|
||||
rObj.Payload = readCloser{headReader, rObj.Payload}
|
||||
payload = readCloser{headReader, payload}
|
||||
}
|
||||
req.SetContentType(contentType)
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||
|
||||
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
||||
req.Response.SetBodyStream(payload, int(payloadSize))
|
||||
}
|
||||
|
||||
func (r *request) setIDs(obj object.Object) {
|
||||
objID, _ := obj.ID()
|
||||
cnrID, _ := obj.ContainerID()
|
||||
r.Response.Header.Set(hdrObjectID, objID.String())
|
||||
r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
||||
r.Response.Header.Set(hdrContainerID, cnrID.String())
|
||||
}
|
||||
|
||||
func (r *request) setDisposition(shouldDownload bool, filename string) {
|
||||
const (
|
||||
inlineDisposition = "inline"
|
||||
attachmentDisposition = "attachment"
|
||||
)
|
||||
|
||||
dis := inlineDisposition
|
||||
if shouldDownload {
|
||||
dis = attachmentDisposition
|
||||
}
|
||||
|
||||
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||
}
|
||||
|
||||
func (r *request) setTimestamp(timestamp string) error {
|
||||
value, err := strconv.ParseInt(timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -43,22 +43,24 @@ func (pr *putResponse) encode(w io.Writer) error {
|
|||
}
|
||||
|
||||
// Upload handles multipart upload request.
|
||||
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||
var (
|
||||
file MultipartFile
|
||||
idObj oid.ID
|
||||
addr oid.Address
|
||||
scid, _ = req.UserValue("cid").(string)
|
||||
log = h.log.With(zap.String("cid", scid))
|
||||
bodyStream = req.RequestBodyStream()
|
||||
drainBuf = make([]byte, drainBufSize)
|
||||
file MultipartFile
|
||||
idObj oid.ID
|
||||
addr oid.Address
|
||||
)
|
||||
|
||||
ctx := utils.GetContextFromRequest(req)
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
bodyStream := c.RequestBodyStream()
|
||||
drainBuf := make([]byte, drainBufSize)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||
log := reqLog.With(zap.String("cid", scid))
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(req, log, err)
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -75,21 +77,23 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
|||
zap.Error(err),
|
||||
)
|
||||
}()
|
||||
boundary := string(req.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
|
||||
|
||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||
response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
filtered, err := filterHeaders(h.log, &req.Request.Header)
|
||||
|
||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||
response.Error(req, err.Error(), fasthttp.StatusBadRequest)
|
||||
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
} else {
|
||||
|
@ -97,9 +101,9 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
if err = utils.PrepareExpirationHeader(req, h.frostfs, filtered, now); err != nil {
|
||||
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -143,7 +147,7 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
|||
}
|
||||
|
||||
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
||||
h.handlePutFrostFSErr(req, err)
|
||||
h.handlePutFrostFSErr(c, err, log)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -151,9 +155,9 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
|||
addr.SetContainer(bktInfo.CID)
|
||||
|
||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||
if err = newPutResponse(addr).encode(req); err != nil {
|
||||
if err = newPutResponse(addr).encode(c); err != nil {
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||
response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
|
||||
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -170,15 +174,15 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
|||
}
|
||||
}
|
||||
// Report status code and content type.
|
||||
req.Response.SetStatusCode(fasthttp.StatusOK)
|
||||
req.Response.Header.SetContentType(jsonHeader)
|
||||
c.Response.SetStatusCode(fasthttp.StatusOK)
|
||||
c.Response.Header.SetContentType(jsonHeader)
|
||||
}
|
||||
|
||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
||||
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
|
||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||
|
||||
h.log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||
response.Error(r, msg, statusCode)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,14 +2,18 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -38,6 +42,25 @@ func bearerToken(ctx context.Context) *bearer.Token {
|
|||
return nil
|
||||
}
|
||||
|
||||
func isDir(name string) bool {
|
||||
return strings.HasSuffix(name, "/")
|
||||
}
|
||||
|
||||
func isContainerRoot(key string) bool {
|
||||
return key == ""
|
||||
}
|
||||
|
||||
func checkErrorType(err error) int {
|
||||
switch {
|
||||
case err == nil:
|
||||
return fasthttp.StatusOK
|
||||
case errors.Is(err, tree.ErrNodeAccessDenied):
|
||||
return fasthttp.StatusForbidden
|
||||
default:
|
||||
return fasthttp.StatusNotFound
|
||||
}
|
||||
}
|
||||
|
||||
func isValidToken(s string) bool {
|
||||
for _, c := range s {
|
||||
if c <= ' ' || c > 127 {
|
||||
|
@ -69,3 +92,10 @@ func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
|||
}
|
||||
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
}
|
||||
|
||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnr)
|
||||
addr.SetObject(obj)
|
||||
return addr
|
||||
}
|
||||
|
|
|
@ -31,6 +31,8 @@ const (
|
|||
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
|
||||
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
|
||||
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
|
||||
FailedToReadIndexPageTemplate = "failed to read index page template, set default" // Warn in ../../app.go
|
||||
SetCustomIndexPageTemplate = "set custom index page template" // Info in ../../app.go
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
|
||||
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
|
||||
|
|
|
@ -33,9 +33,9 @@ func NewFrostFS(p *pool.Pool) *FrostFS {
|
|||
}
|
||||
|
||||
// Container implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) Container(ctx context.Context, layerPrm handler.PrmContainer) (*container.Container, error) {
|
||||
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
|
||||
prm := pool.PrmContainerGet{
|
||||
ContainerID: layerPrm.ContainerID,
|
||||
ContainerID: containerPrm.ContainerID,
|
||||
}
|
||||
|
||||
res, err := x.pool.GetContainer(ctx, prm)
|
||||
|
@ -60,7 +60,10 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate)
|
|||
}
|
||||
|
||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
||||
return idObj, handleObjectError("save object via connection pool", err)
|
||||
if err != nil {
|
||||
return oid.ID{}, handleObjectError("save object via connection pool", err)
|
||||
}
|
||||
return idObj.ObjectID, nil
|
||||
}
|
||||
|
||||
// wraps io.ReadCloser and transforms Read errors related to access violation
|
241
internal/service/frostfs/multi_object_reader.go
Normal file
241
internal/service/frostfs/multi_object_reader.go
Normal file
|
@ -0,0 +1,241 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// PartInfo is upload information about part.
|
||||
type PartInfo struct {
|
||||
Key string `json:"key"`
|
||||
UploadID string `json:"uploadId"`
|
||||
Number int `json:"number"`
|
||||
OID oid.ID `json:"oid"`
|
||||
Size uint64 `json:"size"`
|
||||
ETag string `json:"etag"`
|
||||
MD5 string `json:"md5"`
|
||||
Created time.Time `json:"created"`
|
||||
}
|
||||
|
||||
type GetFrostFSParams struct {
|
||||
// payload range
|
||||
Off, Ln uint64
|
||||
Addr oid.Address
|
||||
}
|
||||
|
||||
type PartObj struct {
|
||||
OID oid.ID
|
||||
Size uint64
|
||||
}
|
||||
|
||||
type readerInitiator interface {
|
||||
InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// MultiObjectReader implements io.Reader of payloads of the object list stored in the FrostFS network.
|
||||
type MultiObjectReader struct {
|
||||
ctx context.Context
|
||||
|
||||
layer readerInitiator
|
||||
|
||||
startPartOffset uint64
|
||||
endPartLength uint64
|
||||
|
||||
prm GetFrostFSParams
|
||||
|
||||
curIndex int
|
||||
curReader io.ReadCloser
|
||||
|
||||
parts []PartObj
|
||||
}
|
||||
|
||||
type MultiObjectReaderConfig struct {
|
||||
Initiator readerInitiator
|
||||
|
||||
// the offset of complete object and total size to read
|
||||
Off, Ln uint64
|
||||
|
||||
Addr oid.Address
|
||||
Parts []PartObj
|
||||
}
|
||||
|
||||
var (
|
||||
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
|
||||
errLengthIsOutOfRange = errors.New("length is out of payload range")
|
||||
errEmptyPartsList = errors.New("empty parts list")
|
||||
errorZeroRangeLength = errors.New("zero range length")
|
||||
)
|
||||
|
||||
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
|
||||
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
|
||||
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
|
||||
Address: p.Addr,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get combined object '%s': %w", p.Addr.Object().EncodeToString(), err)
|
||||
}
|
||||
|
||||
var parts []*PartInfo
|
||||
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||
}
|
||||
|
||||
objParts := make([]PartObj, len(parts))
|
||||
for i, part := range parts {
|
||||
objParts[i] = PartObj{
|
||||
OID: part.OID,
|
||||
Size: part.Size,
|
||||
}
|
||||
}
|
||||
|
||||
return NewMultiObjectReader(ctx, MultiObjectReaderConfig{
|
||||
Initiator: x,
|
||||
Off: p.Off,
|
||||
Ln: p.Ln,
|
||||
Parts: objParts,
|
||||
Addr: p.Addr,
|
||||
})
|
||||
}
|
||||
|
||||
func NewMultiObjectReader(ctx context.Context, cfg MultiObjectReaderConfig) (*MultiObjectReader, error) {
|
||||
if len(cfg.Parts) == 0 {
|
||||
return nil, errEmptyPartsList
|
||||
}
|
||||
|
||||
r := &MultiObjectReader{
|
||||
ctx: ctx,
|
||||
layer: cfg.Initiator,
|
||||
prm: GetFrostFSParams{
|
||||
Addr: cfg.Addr,
|
||||
},
|
||||
parts: cfg.Parts,
|
||||
}
|
||||
|
||||
if cfg.Off+cfg.Ln == 0 {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if cfg.Off > 0 && cfg.Ln == 0 {
|
||||
return nil, errorZeroRangeLength
|
||||
}
|
||||
|
||||
startPartIndex, startPartOffset := findStartPart(cfg)
|
||||
if startPartIndex == -1 {
|
||||
return nil, errOffsetIsOutOfRange
|
||||
}
|
||||
r.startPartOffset = startPartOffset
|
||||
|
||||
endPartIndex, endPartLength := findEndPart(cfg)
|
||||
if endPartIndex == -1 {
|
||||
return nil, errLengthIsOutOfRange
|
||||
}
|
||||
r.endPartLength = endPartLength
|
||||
|
||||
r.parts = cfg.Parts[startPartIndex : endPartIndex+1]
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func findStartPart(cfg MultiObjectReaderConfig) (index int, offset uint64) {
|
||||
position := cfg.Off
|
||||
for i, part := range cfg.Parts {
|
||||
// Strict inequality when searching for start position to avoid reading zero length part.
|
||||
if position < part.Size {
|
||||
return i, position
|
||||
}
|
||||
position -= part.Size
|
||||
}
|
||||
|
||||
return -1, 0
|
||||
}
|
||||
|
||||
func findEndPart(cfg MultiObjectReaderConfig) (index int, length uint64) {
|
||||
position := cfg.Off + cfg.Ln
|
||||
for i, part := range cfg.Parts {
|
||||
// Non-strict inequality when searching for end position to avoid out of payload range error.
|
||||
if position <= part.Size {
|
||||
return i, position
|
||||
}
|
||||
position -= part.Size
|
||||
}
|
||||
|
||||
return -1, 0
|
||||
}
|
||||
|
||||
func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
|
||||
if x.curReader != nil {
|
||||
n, err = x.curReader.Read(p)
|
||||
if err != nil {
|
||||
if closeErr := x.curReader.Close(); closeErr != nil {
|
||||
return n, fmt.Errorf("%w (close err: %v)", err, closeErr)
|
||||
}
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
x.curIndex++
|
||||
}
|
||||
|
||||
if x.curIndex == len(x.parts) {
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
x.prm.Addr.SetObject(x.parts[x.curIndex].OID)
|
||||
|
||||
if x.curIndex == 0 {
|
||||
x.prm.Off = x.startPartOffset
|
||||
x.prm.Ln = x.parts[x.curIndex].Size - x.startPartOffset
|
||||
}
|
||||
|
||||
if x.curIndex == len(x.parts)-1 {
|
||||
x.prm.Ln = x.endPartLength - x.prm.Off
|
||||
}
|
||||
|
||||
x.curReader, err = x.layer.InitFrostFSObjectPayloadReader(x.ctx, x.prm)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
||||
}
|
||||
|
||||
x.prm.Off = 0
|
||||
x.prm.Ln = 0
|
||||
|
||||
next, err := x.Read(p[n:])
|
||||
|
||||
return n + next, err
|
||||
}
|
||||
|
||||
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
|
||||
// Zero range corresponds to full payload (panics if only offset is set).
|
||||
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
||||
var prmAuth handler.PrmAuth
|
||||
|
||||
if p.Off+p.Ln != 0 {
|
||||
prm := handler.PrmObjectRange{
|
||||
PrmAuth: prmAuth,
|
||||
PayloadRange: [2]uint64{p.Off, p.Ln},
|
||||
Address: p.Addr,
|
||||
}
|
||||
|
||||
return x.RangeObject(ctx, prm)
|
||||
}
|
||||
|
||||
prm := handler.PrmObjectGet{
|
||||
PrmAuth: prmAuth,
|
||||
Address: p.Addr,
|
||||
}
|
||||
|
||||
res, err := x.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Payload, nil
|
||||
}
|
137
internal/service/frostfs/multi_object_reader_test.go
Normal file
137
internal/service/frostfs/multi_object_reader_test.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type readerInitiatorMock struct {
|
||||
parts map[oid.ID][]byte
|
||||
}
|
||||
|
||||
func (r *readerInitiatorMock) InitFrostFSObjectPayloadReader(_ context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
||||
partPayload, ok := r.parts[p.Addr.Object()]
|
||||
if !ok {
|
||||
return nil, errors.New("part not found")
|
||||
}
|
||||
|
||||
if p.Off+p.Ln == 0 {
|
||||
return io.NopCloser(bytes.NewReader(partPayload)), nil
|
||||
}
|
||||
|
||||
if p.Off > uint64(len(partPayload)-1) {
|
||||
return nil, fmt.Errorf("invalid offset: %d/%d", p.Off, len(partPayload))
|
||||
}
|
||||
|
||||
if p.Off+p.Ln > uint64(len(partPayload)) {
|
||||
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.Off, p.Off+p.Ln, len(partPayload))
|
||||
}
|
||||
|
||||
return io.NopCloser(bytes.NewReader(partPayload[p.Off : p.Off+p.Ln])), nil
|
||||
}
|
||||
|
||||
func prepareDataReader() ([]byte, []PartObj, *readerInitiatorMock) {
|
||||
mockInitReader := &readerInitiatorMock{
|
||||
parts: map[oid.ID][]byte{
|
||||
oidtest.ID(): []byte("first part 1"),
|
||||
oidtest.ID(): []byte("second part 2"),
|
||||
oidtest.ID(): []byte("third part 3"),
|
||||
},
|
||||
}
|
||||
|
||||
var fullPayload []byte
|
||||
parts := make([]PartObj, 0, len(mockInitReader.parts))
|
||||
for id, payload := range mockInitReader.parts {
|
||||
parts = append(parts, PartObj{OID: id, Size: uint64(len(payload))})
|
||||
fullPayload = append(fullPayload, payload...)
|
||||
}
|
||||
|
||||
return fullPayload, parts, mockInitReader
|
||||
}
|
||||
|
||||
func TestMultiReader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
fullPayload, parts, mockInitReader := prepareDataReader()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
off uint64
|
||||
ln uint64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "simple read all",
|
||||
},
|
||||
{
|
||||
name: "simple read with length",
|
||||
ln: uint64(len(fullPayload)),
|
||||
},
|
||||
{
|
||||
name: "middle of parts",
|
||||
off: parts[0].Size + 2,
|
||||
ln: 4,
|
||||
},
|
||||
{
|
||||
name: "first and second",
|
||||
off: parts[0].Size - 4,
|
||||
ln: 8,
|
||||
},
|
||||
{
|
||||
name: "first and third",
|
||||
off: parts[0].Size - 4,
|
||||
ln: parts[1].Size + 8,
|
||||
},
|
||||
{
|
||||
name: "second part",
|
||||
off: parts[0].Size,
|
||||
ln: parts[1].Size,
|
||||
},
|
||||
{
|
||||
name: "second and third",
|
||||
off: parts[0].Size,
|
||||
ln: parts[1].Size + parts[2].Size,
|
||||
},
|
||||
{
|
||||
name: "offset out of range",
|
||||
off: uint64(len(fullPayload) + 1),
|
||||
ln: 1,
|
||||
err: errOffsetIsOutOfRange,
|
||||
},
|
||||
{
|
||||
name: "zero length",
|
||||
off: parts[1].Size + 1,
|
||||
ln: 0,
|
||||
err: errorZeroRangeLength,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
multiReader, err := NewMultiObjectReader(ctx, MultiObjectReaderConfig{
|
||||
Initiator: mockInitReader,
|
||||
Parts: parts,
|
||||
Off: tc.off,
|
||||
Ln: tc.ln,
|
||||
})
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
|
||||
if tc.err == nil {
|
||||
off := tc.off
|
||||
ln := tc.ln
|
||||
if off+ln == 0 {
|
||||
ln = uint64(len(fullPayload))
|
||||
}
|
||||
data, err := io.ReadAll(multiReader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fullPayload[off:off+ln], data)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
163
internal/service/frostfs/pool_wrapper.go
Normal file
163
internal/service/frostfs/pool_wrapper.go
Normal file
|
@ -0,0 +1,163 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||
)
|
||||
|
||||
type GetNodeByPathResponseInfoWrapper struct {
|
||||
response *grpcService.GetNodeByPathResponse_Info
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
||||
return []uint64{n.response.GetNodeId()}
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
||||
return []uint64{n.response.GetParentId()}
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
||||
return []uint64{n.response.GetTimestamp()}
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type PoolWrapper struct {
|
||||
p *treepool.Pool
|
||||
}
|
||||
|
||||
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||
return &PoolWrapper{p: p}
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
poolPrm := treepool.GetNodesParams{
|
||||
CID: prm.CnrID,
|
||||
TreeID: prm.TreeID,
|
||||
Path: prm.Path,
|
||||
Meta: prm.Meta,
|
||||
PathAttribute: tree.FileNameKey,
|
||||
LatestOnly: prm.LatestOnly,
|
||||
AllAttrs: prm.AllAttrs,
|
||||
BearerToken: getBearer(ctx),
|
||||
}
|
||||
|
||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
res := make([]tree.NodeResponse, len(nodes))
|
||||
for i, info := range nodes {
|
||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getBearer(ctx context.Context) []byte {
|
||||
token, err := tokens.LoadBearerToken(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return token.Marshal()
|
||||
}
|
||||
|
||||
func handleError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||
order := treepool.NoneOrder
|
||||
if sort {
|
||||
order = treepool.AscendingOrder
|
||||
}
|
||||
poolPrm := treepool.GetSubTreeParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
RootID: rootID,
|
||||
Depth: depth,
|
||||
BearerToken: getBearer(ctx),
|
||||
Order: order,
|
||||
}
|
||||
if len(rootID) == 1 && rootID[0] == 0 {
|
||||
// storage node interprets 'nil' value as []uint64{0}
|
||||
// gate wants to send 'nil' value instead of []uint64{0}, because
|
||||
// it provides compatibility with previous tree service api where
|
||||
// single uint64(0) value is dropped from signature
|
||||
poolPrm.RootID = nil
|
||||
}
|
||||
|
||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
var subtree []tree.NodeResponse
|
||||
|
||||
node, err := subTreeReader.Next()
|
||||
for err == nil {
|
||||
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
|
||||
node, err = subTreeReader.Next()
|
||||
}
|
||||
if err != io.EOF {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
return subtree, nil
|
||||
}
|
||||
|
||||
type GetSubTreeResponseBodyWrapper struct {
|
||||
response *grpcService.GetSubTreeResponse_Body
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
||||
resp := n.response.GetParentId()
|
||||
if resp == nil {
|
||||
// storage sends nil that should be interpreted as []uint64{0}
|
||||
// due to protobuf compatibility, see 'GetSubTree' function
|
||||
return []uint64{0}
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
90
internal/templates/index.gotmpl
Normal file
90
internal/templates/index.gotmpl
Normal file
|
@ -0,0 +1,90 @@
|
|||
{{$bucketName := .BucketName}}
|
||||
{{ $prefix := trimPrefix .Prefix }}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<title>Index of s3://{{$bucketName}}/{{if $prefix}}/{{$prefix}}/{{end}}</title>
|
||||
<style>
|
||||
table {
|
||||
width: 80%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
body {
|
||||
background: #f2f2f2;
|
||||
}
|
||||
table, th, td {
|
||||
border: 0 solid transparent;
|
||||
}
|
||||
th, td {
|
||||
padding: 10px;
|
||||
text-align: left;
|
||||
}
|
||||
th {
|
||||
background-color: #c3bcbc;
|
||||
}
|
||||
tr:nth-child(even) {background-color: #ebe7e7;}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Index of s3://{{$bucketName}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Filename</th>
|
||||
<th>Size</th>
|
||||
<th>Created</th>
|
||||
<th>Download</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ $trimmedPrefix := trimPrefix $prefix }}
|
||||
{{if $trimmedPrefix }}
|
||||
<tr>
|
||||
<td>
|
||||
⮐<a href="/get/{{$bucketName}}{{ urlencode $trimmedPrefix "" }}">..</a>
|
||||
</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{{else}}
|
||||
<tr>
|
||||
<td>
|
||||
⮐<a href="/get/{{ $bucketName }}/">..</a>
|
||||
</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{range .Objects}}
|
||||
<tr>
|
||||
<td>
|
||||
{{if .IsDir}}
|
||||
🗀
|
||||
<a href="/get/{{ $bucketName }}{{ urlencode $prefix .FileName }}/">
|
||||
{{.FileName}}/
|
||||
</a>
|
||||
{{else}}
|
||||
🗎
|
||||
<a href="/get/{{ $bucketName }}{{ urlencode $prefix .FileName }}">
|
||||
{{.FileName}}
|
||||
</a>
|
||||
{{end}}
|
||||
</td>
|
||||
<td>{{if not .IsDir}}{{ formatSize .Size }}{{end}}</td>
|
||||
<td>{{if not .IsDir}}{{ formatTimestamp .Created }}{{end}}</td>
|
||||
<td>
|
||||
{{ if not .IsDir }}
|
||||
<a href="/get/{{ $bucketName}}{{ urlencode $prefix .FileName }}?download=true">
|
||||
Link
|
||||
</a>
|
||||
{{ end }}
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
6
internal/templates/template.go
Normal file
6
internal/templates/template.go
Normal file
|
@ -0,0 +1,6 @@
|
|||
package templates
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed index.gotmpl
|
||||
var DefaultIndexTemplate string
|
|
@ -191,8 +191,6 @@ func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
|
|||
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodGetContainerEacl).Set(float64(node.AverageGetContainerEACL().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodSetContainerEacl).Set(float64(node.AverageSetContainerEACL().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
|
||||
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
|
||||
|
|
|
@ -52,8 +52,8 @@ func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
|
|||
|
||||
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
||||
// it in the application context.
|
||||
func StoreBearerTokenAppCtx(ctx context.Context, req *fasthttp.RequestCtx) (context.Context, error) {
|
||||
tkn, err := fetchBearerToken(req)
|
||||
func StoreBearerTokenAppCtx(ctx context.Context, c *fasthttp.RequestCtx) (context.Context, error) {
|
||||
tkn, err := fetchBearerToken(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
204
tree/tree.go
204
tree/tree.go
|
@ -2,11 +2,13 @@ package tree
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
@ -20,6 +22,7 @@ type (
|
|||
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
||||
ServiceClient interface {
|
||||
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
||||
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
|
||||
}
|
||||
|
||||
treeNode struct {
|
||||
|
@ -29,6 +32,7 @@ type (
|
|||
|
||||
GetNodesParams struct {
|
||||
CnrID cid.ID
|
||||
BktInfo *data.BucketInfo
|
||||
TreeID string
|
||||
Path []string
|
||||
Meta []string
|
||||
|
@ -54,6 +58,7 @@ const (
|
|||
|
||||
// keys for delete marker nodes.
|
||||
isDeleteMarkerKV = "IsDeleteMarker"
|
||||
sizeKV = "Size"
|
||||
|
||||
// versionTree -- ID of a tree with object versions.
|
||||
versionTree = "version"
|
||||
|
@ -73,26 +78,28 @@ type Meta interface {
|
|||
|
||||
type NodeResponse interface {
|
||||
GetMeta() []Meta
|
||||
GetTimestamp() uint64
|
||||
GetTimestamp() []uint64
|
||||
GetNodeID() []uint64
|
||||
GetParentID() []uint64
|
||||
}
|
||||
|
||||
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
||||
treeNode := &treeNode{
|
||||
tNode := &treeNode{
|
||||
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
||||
}
|
||||
|
||||
for _, kv := range nodeInfo.GetMeta() {
|
||||
switch kv.GetKey() {
|
||||
case oidKV:
|
||||
if err := treeNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
||||
if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
treeNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||
}
|
||||
}
|
||||
|
||||
return treeNode, nil
|
||||
return tNode, nil
|
||||
}
|
||||
|
||||
func (n *treeNode) Get(key string) (string, bool) {
|
||||
|
@ -106,29 +113,44 @@ func (n *treeNode) FileName() (string, bool) {
|
|||
}
|
||||
|
||||
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
||||
treeNode, err := newTreeNode(node)
|
||||
tNode, err := newTreeNode(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid tree node: %w", err)
|
||||
}
|
||||
|
||||
return newNodeVersionFromTreeNode(treeNode), nil
|
||||
return newNodeVersionFromTreeNode(tNode), nil
|
||||
}
|
||||
|
||||
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
||||
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
||||
|
||||
size, _ := treeNode.Get(sizeKV)
|
||||
version := &api.NodeVersion{
|
||||
BaseNodeVersion: api.BaseNodeVersion{
|
||||
OID: treeNode.ObjID,
|
||||
},
|
||||
DeleteMarker: isDeleteMarker,
|
||||
IsPrefixNode: size == "",
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
||||
meta := []string{oidKV, isDeleteMarkerKV}
|
||||
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
latestNode, err := getLatestVersionNode(nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newNodeVersion(latestNode)
|
||||
}
|
||||
|
||||
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
||||
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
||||
path := pathFromName(objectName)
|
||||
|
||||
p := &GetNodesParams{
|
||||
|
@ -139,30 +161,24 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
|
|||
LatestOnly: false,
|
||||
AllAttrs: false,
|
||||
}
|
||||
nodes, err := c.service.GetNodes(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
latestNode, err := getLatestNode(nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newNodeVersion(latestNode)
|
||||
return c.service.GetNodes(ctx, p)
|
||||
}
|
||||
|
||||
func getLatestNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||
func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||
var (
|
||||
maxCreationTime uint64
|
||||
targetIndexNode = -1
|
||||
)
|
||||
|
||||
for i, node := range nodes {
|
||||
currentCreationTime := node.GetTimestamp()
|
||||
if checkExistOID(node.GetMeta()) && currentCreationTime > maxCreationTime {
|
||||
maxCreationTime = currentCreationTime
|
||||
if !checkExistOID(node.GetMeta()) {
|
||||
continue
|
||||
}
|
||||
|
||||
if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime {
|
||||
targetIndexNode = i
|
||||
maxCreationTime = currentCreationTime
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,3 +203,145 @@ func checkExistOID(meta []Meta) bool {
|
|||
func pathFromName(objectName string) []string {
|
||||
return strings.Split(objectName, separator)
|
||||
}
|
||||
|
||||
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
|
||||
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, layer.ErrNodeNotFound) {
|
||||
return nil, "", nil
|
||||
}
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
||||
for _, node := range subTree {
|
||||
if MultiID(rootID).Equal(node.GetNodeID()) {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := GetFilename(node)
|
||||
if !strings.HasPrefix(fileName, tailPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes := nodesMap[fileName]
|
||||
|
||||
// Add all nodes if flag latestOnly is false.
|
||||
// Add all intermediate nodes
|
||||
// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
|
||||
if len(nodes) == 0 {
|
||||
nodes = []NodeResponse{node}
|
||||
} else if !latestOnly || isIntermediate(node) {
|
||||
nodes = append(nodes, node)
|
||||
} else if isIntermediate(nodes[0]) {
|
||||
nodes = append([]NodeResponse{node}, nodes...)
|
||||
} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
|
||||
nodes[0] = node
|
||||
}
|
||||
|
||||
nodesMap[fileName] = nodes
|
||||
}
|
||||
|
||||
result := make([]NodeResponse, 0, len(subTree))
|
||||
for _, nodes := range nodesMap {
|
||||
result = append(result, nodes...)
|
||||
}
|
||||
|
||||
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
||||
}
|
||||
|
||||
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
||||
rootID := []uint64{0}
|
||||
path := strings.Split(prefix, separator)
|
||||
tailPrefix := path[len(path)-1]
|
||||
|
||||
if len(path) > 1 {
|
||||
var err error
|
||||
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
return rootID, tailPrefix, nil
|
||||
}
|
||||
|
||||
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
||||
p := &GetNodesParams{
|
||||
CnrID: bktInfo.CID,
|
||||
BktInfo: bktInfo,
|
||||
TreeID: treeID,
|
||||
Path: prefixPath,
|
||||
LatestOnly: false,
|
||||
AllAttrs: true,
|
||||
}
|
||||
nodes, err := c.service.GetNodes(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var intermediateNodes []uint64
|
||||
for _, node := range nodes {
|
||||
if isIntermediate(node) {
|
||||
intermediateNodes = append(intermediateNodes, node.GetNodeID()...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(intermediateNodes) == 0 {
|
||||
return nil, layer.ErrNodeNotFound
|
||||
}
|
||||
|
||||
return intermediateNodes, nil
|
||||
}
|
||||
|
||||
func GetFilename(node NodeResponse) string {
|
||||
for _, kv := range node.GetMeta() {
|
||||
if kv.GetKey() == FileNameKey {
|
||||
return string(kv.GetValue())
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func isIntermediate(node NodeResponse) bool {
|
||||
if len(node.GetMeta()) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
return node.GetMeta()[0].GetKey() == FileNameKey
|
||||
}
|
||||
|
||||
func getMaxTimestamp(node NodeResponse) uint64 {
|
||||
var maxTimestamp uint64
|
||||
|
||||
for _, timestamp := range node.GetTimestamp() {
|
||||
if timestamp > maxTimestamp {
|
||||
maxTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
return maxTimestamp
|
||||
}
|
||||
|
||||
type MultiID []uint64
|
||||
|
||||
func (m MultiID) Equal(id MultiID) bool {
|
||||
seen := make(map[uint64]struct{}, len(m))
|
||||
|
||||
for i := range m {
|
||||
seen[m[i]] = struct{}{}
|
||||
}
|
||||
|
||||
for i := range id {
|
||||
if _, ok := seen[id[i]]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -21,10 +21,10 @@ func (m nodeMeta) GetValue() []byte {
|
|||
|
||||
type nodeResponse struct {
|
||||
meta []nodeMeta
|
||||
timestamp uint64
|
||||
timestamp []uint64
|
||||
}
|
||||
|
||||
func (n nodeResponse) GetTimestamp() uint64 {
|
||||
func (n nodeResponse) GetTimestamp() []uint64 {
|
||||
return n.timestamp
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,13 @@ func (n nodeResponse) GetMeta() []Meta {
|
|||
return res
|
||||
}
|
||||
|
||||
func (n nodeResponse) GetNodeID() []uint64 {
|
||||
return nil
|
||||
}
|
||||
func (n nodeResponse) GetParentID() []uint64 {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetLatestNode(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
@ -52,7 +59,7 @@ func TestGetLatestNode(t *testing.T) {
|
|||
name: "one node of the object version",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 1,
|
||||
timestamp: []uint64{1},
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
|
@ -67,11 +74,11 @@ func TestGetLatestNode(t *testing.T) {
|
|||
name: "one node of the object version and one node of the secondary object",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 3,
|
||||
timestamp: []uint64{3},
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 1,
|
||||
timestamp: []uint64{1},
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
|
@ -86,11 +93,11 @@ func TestGetLatestNode(t *testing.T) {
|
|||
name: "all nodes represent a secondary object",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 3,
|
||||
timestamp: []uint64{3},
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 5,
|
||||
timestamp: []uint64{5},
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
},
|
||||
|
@ -100,7 +107,7 @@ func TestGetLatestNode(t *testing.T) {
|
|||
name: "several nodes of different types and with different timestamp",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 1,
|
||||
timestamp: []uint64{1},
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
|
@ -109,11 +116,11 @@ func TestGetLatestNode(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 3,
|
||||
timestamp: []uint64{3},
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 4,
|
||||
timestamp: []uint64{4},
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
|
@ -122,7 +129,7 @@ func TestGetLatestNode(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 6,
|
||||
timestamp: []uint64{6},
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
},
|
||||
|
@ -130,7 +137,7 @@ func TestGetLatestNode(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualNode, err := getLatestNode(tc.nodes)
|
||||
actualNode, err := getLatestVersionNode(tc.nodes)
|
||||
if tc.error {
|
||||
require.Error(t, err)
|
||||
return
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SetContextToRequest adds new context to fasthttp request.
|
||||
|
@ -15,3 +16,34 @@ func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) {
|
|||
func GetContextFromRequest(c *fasthttp.RequestCtx) context.Context {
|
||||
return c.UserValue("context").(context.Context)
|
||||
}
|
||||
|
||||
type ctxReqLoggerKeyType struct{}
|
||||
|
||||
// SetReqLog sets child zap.Logger in the context.
|
||||
func SetReqLog(ctx context.Context, log *zap.Logger) context.Context {
|
||||
if ctx == nil {
|
||||
return nil
|
||||
}
|
||||
return context.WithValue(ctx, ctxReqLoggerKeyType{}, log)
|
||||
}
|
||||
|
||||
// GetReqLog returns log if set.
|
||||
// If zap.Logger isn't set returns nil.
|
||||
func GetReqLog(ctx context.Context) *zap.Logger {
|
||||
if ctx == nil {
|
||||
return nil
|
||||
} else if r, ok := ctx.Value(ctxReqLoggerKeyType{}).(*zap.Logger); ok {
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetReqLogOrDefault returns log from context, if it exists.
|
||||
// If the log is missing from the context, the default logger is returned.
|
||||
func GetReqLogOrDefault(ctx context.Context, defaultLog *zap.Logger) *zap.Logger {
|
||||
log := GetReqLog(ctx)
|
||||
if log == nil {
|
||||
log = defaultLog
|
||||
}
|
||||
return log
|
||||
}
|
||||
|
|
27
web/index.html
Normal file
27
web/index.html
Normal file
|
@ -0,0 +1,27 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>HLS DEMO</title>
|
||||
|
||||
<script type="module" src="https://cdn.jsdelivr.net/npm/@vime/core@^5/dist/vime/vime.esm.js"></script>
|
||||
|
||||
<script>
|
||||
// document.cookie = "Bearer=CiQSGwoZNer6BLA9gR/BkHhEjaslCQwAOIB61j6S4RoDCJBOIAESZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEvePlLHzzI93xPbdiCwUPZdegyGrAaSOzPrH/sMPU1wpwR1pD5+jW3XnheX1CN1d1AnnJtXSPAbCgt9h2PWp8Og==; path=/";
|
||||
|
||||
var origSend = XMLHttpRequest.prototype.send;
|
||||
XMLHttpRequest.prototype.send = function () {
|
||||
this.setRequestHeader('Authorization', 'Bearer CiQSGwoZNer6BLA9gR/BkHhEjaslCQwAOIB61j6S4RoDCJBOIAESZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEvePlLHzzI93xPbdiCwUPZdegyGrAaSOzPrH/sMPU1wpwR1pD5+jW3XnheX1CN1d1AnnJtXSPAbCgt9h2PWp8Og==')
|
||||
origSend.apply(this, arguments);
|
||||
};
|
||||
</script>
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<vm-player controls debug="true">
|
||||
<vm-hls version="latest" crossOrigin="use-credentials">
|
||||
<source data-src="http://localhost:8082/get/hls/index.m3u8" type="application/x-mpegURL"/>
|
||||
</vm-hls>
|
||||
</vm-player>
|
||||
</body>
|
||||
</html>
|
Loading…
Reference in a new issue