Compare commits

...

2 commits

Author SHA1 Message Date
876f7b7dcb [#174] Add kludge additional search
Advanced search is needed because some
software may keep FileName attribute
and ignore FilePath attribute during
file upload.

Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-12-12 09:28:33 +03:00
71fc6c85d5 [#163] Support JSON bearer token
All checks were successful
/ DCO (pull_request) Successful in 5m13s
/ Vulncheck (pull_request) Successful in 6m9s
/ Builds (pull_request) Successful in 4m36s
/ Lint (pull_request) Successful in 5m3s
/ Tests (pull_request) Successful in 4m4s
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-12-11 16:49:50 +03:00
12 changed files with 205 additions and 38 deletions

View file

@ -110,6 +110,7 @@ type (
corsExposeHeaders []string
corsAllowCredentials bool
corsMaxAge int
additionalSearch bool
}
CORS struct {
@ -189,6 +190,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
corsMaxAge := fetchCORSMaxAge(v)
additionalSearch := v.GetBool(cfgKludgeAdditionalSearch)
s.mu.Lock()
defer s.mu.Unlock()
@ -208,6 +210,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
s.corsExposeHeaders = corsExposeHeaders
s.corsAllowCredentials = corsAllowCredentials
s.corsMaxAge = corsMaxAge
s.additionalSearch = additionalSearch
}
func (s *loggerSettings) DroppedLogsInc() {
@ -305,6 +308,12 @@ func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool)
return ns + ".ns", false
}
func (s *appSettings) AdditionalSearch() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.additionalSearch
}
func (a *app) initResolver() {
var err error
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())

View file

@ -75,11 +75,13 @@ func TestIntegration(t *testing.T) {
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
require.NoError(t, err, version)
token := makeBearerToken(t, key, ownerID, version)
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
t.Run("put with json bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, jsonToken) })
t.Run("put with json bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, jsonToken) })
t.Run("put with binary bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, binaryToken) })
t.Run("put with binary bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, binaryToken) })
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
@ -526,7 +528,7 @@ func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
return id.ObjectID
}
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
tkn := new(bearer.Token)
tkn.ForUser(ownerID)
tkn.SetExp(10000)
@ -540,10 +542,16 @@ func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, versio
err := tkn.Sign(key.PrivateKey)
require.NoError(t, err)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
jsonToken, err := tkn.MarshalJSON()
require.NoError(t, err)
return t64
jsonTokenBase64 = base64.StdEncoding.EncodeToString(jsonToken)
binaryTokenBase64 = base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, jsonTokenBase64)
require.NotEmpty(t, binaryTokenBase64)
return
}
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {

View file

@ -164,6 +164,9 @@ const (
cfgMultinetFallbackDelay = "multinet.fallback_delay"
cfgMultinetSubnets = "multinet.subnets"
// Kludge.
cfgKludgeAdditionalSearch = "kludge.additional_search"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"

View file

@ -158,4 +158,7 @@ HTTP_GW_WORKER_POOL_SIZE=1000
# Enable index page support
HTTP_GW_INDEX_PAGE_ENABLED=false
# Index page template path
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
# Enable using additional search by attribute
HTTP_GW_KLUDGE_ADDITIONAL_SEARCH=false

View file

@ -172,3 +172,7 @@ multinet:
source_ips:
- 1.2.3.4
- 1.2.3.5
kludge:
# Enable using additional search by attribute
additional_search: false

View file

@ -59,7 +59,7 @@ $ cat http.log
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
| `index_page` | [Index page configuration](#index_page-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
| `kludge` | [Kludge configuration](#kludge-section) |
# General section
@ -457,3 +457,16 @@ multinet:
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
| `mask` | `string` | yes | | Destination subnet. |
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |
# `kludge` section
Workarounds for non-standard use cases.
```yaml
kludge:
additional_search: true
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------------|--------|---------------|---------------| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `kludge.additional_search` | `bool` | yes | `false` | Enable using additional search by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |

View file

@ -26,6 +26,7 @@ const (
attrOID = "OID"
attrCreated = "Created"
attrFileName = "FileName"
attrFilePath = "FilePath"
attrSize = "Size"
)

View file

@ -35,6 +35,7 @@ type Config interface {
IndexPageTemplate() string
BufferMaxSizeForPut() uint64
NamespaceHeader() string
AdditionalSearch() bool
}
// PrmContainer groups parameters of FrostFS.Container operation.
@ -291,35 +292,58 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, re
return
}
res, err := h.search(ctx, bktInfo.CID, key, val, object.MatchStringEqual)
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
if errors.Is(err, io.EOF) {
response.Error(c, err.Error(), fasthttp.StatusNotFound)
return
}
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
return
}
var addrObj oid.Address
addrObj.SetContainer(bktInfo.CID)
addrObj.SetObject(objID)
f(ctx, *h.newRequest(c, log), addrObj)
}
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
}
defer res.Close()
buf := make([]oid.ID, 1)
n, err := res.Read(buf)
if n == 0 {
if errors.Is(err, io.EOF) {
switch {
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
log.Warn(logs.WarnObjectNotFoundByFilePathTrySearchByFileName)
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, attrVal)
case errors.Is(err, io.EOF):
log.Error(logs.ObjectNotFound, zap.Error(err))
response.Error(c, "object not found", fasthttp.StatusNotFound)
return
return oid.ID{}, fmt.Errorf("object not found: %w", err)
default:
log.Error(logs.ReadObjectListFailed, zap.Error(err))
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
}
log.Error(logs.ReadObjectListFailed, zap.Error(err))
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
return
}
var addrObj oid.Address
addrObj.SetContainer(bktInfo.CID)
addrObj.SetObject(buf[0])
return buf[0], nil
}
f(ctx, *h.newRequest(c, log), addrObj)
func (h *Handler) needSearchByFileName(key, val string) bool {
if key != attrFilePath || !h.config.AdditionalSearch() {
return false
}
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.ContainsRune(val, '/')
}
// resolveContainer decode container id, if it's not a valid container id

View file

@ -44,6 +44,7 @@ func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, [
}
type configMock struct {
additionalSearch bool
}
func (c *configMock) DefaultTimestamp() bool {
@ -78,6 +79,10 @@ func (c *configMock) NamespaceHeader() string {
return ""
}
func (c *configMock) AdditionalSearch() bool {
return c.additionalSearch
}
type handlerContext struct {
key *keys.PrivateKey
owner user.ID
@ -250,6 +255,66 @@ func TestBasic(t *testing.T) {
require.Equal(t, content, string(data))
})
}
func TestNeedSearchByFileName(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
hc.cfg.additionalSearch = true
for _, tc := range []struct {
name string
attrKey string
attrVal string
additionalSearchDisabled bool
expected bool
}{
{
name: "need search - not contains slash",
attrKey: attrFilePath,
attrVal: "cat.png",
expected: true,
},
{
name: "need search - single lead slash",
attrKey: attrFilePath,
attrVal: "/cat.png",
expected: true,
},
{
name: "don't need search - single slash but not lead",
attrKey: attrFilePath,
attrVal: "cats/cat.png",
expected: false,
},
{
name: "don't need search - more one slash",
attrKey: attrFilePath,
attrVal: "/cats/cat.png",
expected: false,
},
{
name: "don't need search - incorrect attribute key",
attrKey: attrFileName,
attrVal: "cat.png",
expected: false,
},
{
name: "don't need search - additional search disabled",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearchDisabled: true,
expected: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.additionalSearchDisabled {
hc.cfg.additionalSearch = false
}
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
require.Equal(t, tc.expected, res)
})
}
}
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
r := new(fasthttp.RequestCtx)

View file

@ -87,4 +87,5 @@ const (
MultinetDialFail = "multinet dial failed"
FailedToLoadMultinetConfig = "failed to load multinet config"
MultinetConfigWontBeUpdated = "multinet config won't be updated"
WarnObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
)

View file

@ -82,14 +82,22 @@ func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) {
tkn = new(bearer.Token)
)
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
if buf = parse(&ctx.Request.Header); buf == nil {
buf = parse(&ctx.Request.Header)
if buf == nil {
continue
} else if data, err := base64.StdEncoding.DecodeString(string(buf)); err != nil {
}
data, err := base64.StdEncoding.DecodeString(string(buf))
if err != nil {
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
continue
} else if err = tkn.Unmarshal(data); err != nil {
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
continue
}
if err = tkn.Unmarshal(data); err != nil {
if err = tkn.UnmarshalJSON(data); err != nil {
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
continue
}
}
return tkn, nil

View file

@ -98,8 +98,14 @@ func TestFetchBearerToken(t *testing.T) {
tkn := new(bearer.Token)
tkn.ForUser(uid)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
jsonToken, err := tkn.MarshalJSON()
require.NoError(t, err)
jsonTokenBase64 := base64.StdEncoding.EncodeToString(jsonToken)
binaryTokenBase64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, jsonTokenBase64)
require.NotEmpty(t, binaryTokenBase64)
cases := []struct {
name string
@ -143,25 +149,47 @@ func TestFetchBearerToken(t *testing.T) {
error: "can't unmarshal bearer token",
},
{
name: "bad header, but good cookie",
name: "bad header, but good cookie with binary token",
header: "dGVzdAo=",
cookie: t64,
cookie: binaryTokenBase64,
expect: tkn,
},
{
name: "bad cookie, but good header",
header: t64,
name: "bad cookie, but good header with binary token",
header: binaryTokenBase64,
cookie: "dGVzdAo=",
expect: tkn,
},
{
name: "ok for header",
header: t64,
name: "bad header, but good cookie with json token",
header: "dGVzdAo=",
cookie: jsonTokenBase64,
expect: tkn,
},
{
name: "ok for cookie",
cookie: t64,
name: "bad cookie, but good header with json token",
header: jsonTokenBase64,
cookie: "dGVzdAo=",
expect: tkn,
},
{
name: "ok for header with binary token",
header: binaryTokenBase64,
expect: tkn,
},
{
name: "ok for cookie with binary token",
cookie: binaryTokenBase64,
expect: tkn,
},
{
name: "ok for header with json token",
header: jsonTokenBase64,
expect: tkn,
},
{
name: "ok for cookie with json token",
cookie: jsonTokenBase64,
expect: tkn,
},
}