Compare commits
No commits in common. "master" and "master" have entirely different histories.
28 changed files with 223 additions and 749 deletions
19
CHANGELOG.md
19
CHANGELOG.md
|
@ -4,22 +4,6 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.32.1] - 2025-01-17
|
||||
|
||||
### Fixed
|
||||
- Response codes when checking ACL format (#531)
|
||||
- CORS unmarshal without xmlns (#594)
|
||||
- Response code for invalid Content-Md5 header (#598)
|
||||
|
||||
### Added
|
||||
- Derive encryption keys for accessbox with salt (#529)
|
||||
- Debug log when bucket settings not found (#595)
|
||||
- Context cancellation during tree node streaming (#569)
|
||||
- Add LimitExceeded error (#589)
|
||||
|
||||
### Changed
|
||||
- Docker image repository (#590, #587)
|
||||
|
||||
## [0.32.0] - Khumbu - 2024-12-20
|
||||
|
||||
### Added
|
||||
|
@ -399,5 +383,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
|||
[0.31.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.1...v0.31.2
|
||||
[0.31.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.2...v0.31.3
|
||||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.3...v0.32.0
|
||||
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.0...v0.32.1
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.1...master
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.0...master
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.32.1
|
||||
v0.32.0
|
||||
|
|
|
@ -57,7 +57,6 @@ const (
|
|||
ErrInvalidCopyDest
|
||||
ErrInvalidPolicyDocument
|
||||
ErrInvalidObjectState
|
||||
ErrMalformedACL
|
||||
ErrMalformedXML
|
||||
ErrMissingContentLength
|
||||
ErrMissingContentMD5
|
||||
|
@ -290,9 +289,6 @@ const (
|
|||
//CORS configuration errors.
|
||||
ErrCORSUnsupportedMethod
|
||||
ErrCORSWildcardExposeHeaders
|
||||
|
||||
// Limits errors.
|
||||
ErrLimitExceeded
|
||||
)
|
||||
|
||||
// error code to Error structure, these fields carry respective
|
||||
|
@ -460,12 +456,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "The requested range is not satisfiable",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrMalformedACL: {
|
||||
ErrCode: ErrMalformedACL,
|
||||
Code: "MalformedACLError",
|
||||
Description: "The ACL that you provided was not well formed or did not validate against our published schema.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedXML: {
|
||||
ErrCode: ErrMalformedXML,
|
||||
Code: "MalformedXML",
|
||||
|
@ -1773,14 +1763,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Content-Range header is mandatory for this type of request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// The Conflict status is used because this error was made based on the LimitExceeded error
|
||||
// from aws iam error https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateUser.html#API_CreateUser_Errors.
|
||||
ErrLimitExceeded: {
|
||||
ErrCode: ErrLimitExceeded,
|
||||
Code: "LimitExceeded",
|
||||
Description: "You have reached the quota limit.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
|
@ -1844,10 +1826,6 @@ func TransformToS3Error(err error) error {
|
|||
return GetAPIError(ErrBucketAlreadyExists)
|
||||
}
|
||||
|
||||
if errors.Is(err, frostfs.ErrQuotaLimitReached) {
|
||||
return GetAPIError(ErrLimitExceeded)
|
||||
}
|
||||
|
||||
return GetAPIError(ErrInternalError)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -298,17 +297,10 @@ type createBucketInfo struct {
|
|||
Key *keys.PrivateKey
|
||||
}
|
||||
|
||||
type bucketPrm struct {
|
||||
bktName string
|
||||
query url.Values
|
||||
box *accessbox.Box
|
||||
createParams createBucketParams
|
||||
}
|
||||
|
||||
func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
|
||||
box, key := createAccessBox(hc.t)
|
||||
|
||||
w := createBucketBase(hc, bucketPrm{bktName: bktName, box: box})
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
|
@ -322,32 +314,13 @@ func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
|
|||
}
|
||||
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code apierr.ErrorCode) {
|
||||
w := createBucketBase(hc, bucketPrm{bktName: bktName, box: box})
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func createBucketWithConstraint(hc *handlerContext, bktName, constraint string) *createBucketInfo {
|
||||
box, key := createAccessBox(hc.t)
|
||||
var prm createBucketParams
|
||||
if constraint != "" {
|
||||
prm.LocationConstraint = constraint
|
||||
}
|
||||
w := createBucketBase(hc, bucketPrm{bktName: bktName, box: box, createParams: prm})
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
return &createBucketInfo{
|
||||
BktInfo: bktInfo,
|
||||
Box: box,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
|
||||
func createBucketBase(hc *handlerContext, prm bucketPrm) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestFullRequest(hc, prm.bktName, "", nil, prm.createParams)
|
||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: prm.box})
|
||||
func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
return w
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
const maxBucketList = 10000
|
||||
|
||||
// ListBucketsHandler handles bucket listing requests.
|
||||
func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
params, err := parseListBucketParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "failed to parse params", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := h.obj.ListBuckets(ctx, params)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, encodeListBuckets(reqInfo.User, resp, params)); err != nil {
|
||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeListBuckets(owner string, resp layer.ListBucketsResult, params layer.ListBucketsParams) *ListBucketsResponse {
|
||||
res := &ListBucketsResponse{
|
||||
Owner: Owner{
|
||||
ID: owner,
|
||||
DisplayName: owner,
|
||||
},
|
||||
ContinuationToken: resp.ContinuationToken,
|
||||
Prefix: params.Prefix,
|
||||
}
|
||||
|
||||
for _, item := range resp.Containers {
|
||||
res.Buckets.Buckets = append(res.Buckets.Buckets, Bucket{
|
||||
Name: item.Name,
|
||||
CreationDate: item.Created.UTC().Format(time.RFC3339),
|
||||
BucketRegion: item.LocationConstraint,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func parseListBucketParams(r *http.Request) (prm layer.ListBucketsParams, err error) {
|
||||
prm.MaxBuckets = maxBucketList
|
||||
strMaxBuckets := r.URL.Query().Get(middleware.QueryMaxBuckets)
|
||||
if strMaxBuckets != "" {
|
||||
if prm.MaxBuckets, err = strconv.Atoi(strMaxBuckets); err != nil || prm.MaxBuckets < 0 {
|
||||
return layer.ListBucketsParams{}, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
||||
}
|
||||
}
|
||||
prm.Prefix = r.URL.Query().Get(middleware.QueryPrefix)
|
||||
prm.BucketRegion = r.URL.Query().Get(middleware.QueryBucketRegion)
|
||||
prm.ContinuationToken = r.URL.Query().Get(middleware.QueryContinuationToken)
|
||||
|
||||
return
|
||||
}
|
|
@ -1,174 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHandler_ListBucketsHandler(t *testing.T) {
|
||||
const defaultConstraint = "default"
|
||||
|
||||
region := "us-west-1"
|
||||
hc := prepareHandlerContext(t)
|
||||
hc.config.putLocationConstraint(region)
|
||||
|
||||
props := []Bucket{
|
||||
{Name: "first"},
|
||||
{Name: "regional", BucketRegion: "us-west-1"},
|
||||
{Name: "third"},
|
||||
}
|
||||
sort.Slice(props, func(i, j int) bool {
|
||||
return props[i].Name < props[j].Name
|
||||
})
|
||||
for _, bkt := range props {
|
||||
createBucketWithConstraint(hc, bkt.Name, bkt.BucketRegion)
|
||||
}
|
||||
|
||||
for _, tt := range []struct {
|
||||
title string
|
||||
token string
|
||||
prefix string
|
||||
bucketRegion string
|
||||
maxBuckets string
|
||||
expectErr bool
|
||||
expected []Bucket
|
||||
expectedToken string
|
||||
}{
|
||||
{
|
||||
title: "no params",
|
||||
expected: []Bucket{
|
||||
{Name: "first", BucketRegion: defaultConstraint},
|
||||
{Name: "regional", BucketRegion: "us-west-1"},
|
||||
{Name: "third", BucketRegion: defaultConstraint},
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "negative max-buckets",
|
||||
maxBuckets: "-1",
|
||||
expected: []Bucket{},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
title: "zero max-buckets",
|
||||
maxBuckets: "0",
|
||||
expected: []Bucket{},
|
||||
},
|
||||
{
|
||||
title: "prefix",
|
||||
prefix: "thi",
|
||||
expected: []Bucket{{Name: "third", BucketRegion: defaultConstraint}},
|
||||
},
|
||||
{
|
||||
title: "wrong prefix",
|
||||
prefix: "sdh",
|
||||
expected: []Bucket{},
|
||||
},
|
||||
{
|
||||
title: "bucket region",
|
||||
bucketRegion: region,
|
||||
expected: []Bucket{{Name: "regional", BucketRegion: "us-west-1"}},
|
||||
},
|
||||
{
|
||||
title: "default bucket region",
|
||||
bucketRegion: defaultConstraint,
|
||||
expected: []Bucket{
|
||||
{Name: "first", BucketRegion: defaultConstraint},
|
||||
{Name: "third", BucketRegion: defaultConstraint},
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "wrong bucket region",
|
||||
bucketRegion: "sj dfdlsj",
|
||||
expected: []Bucket{},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.title, func(t *testing.T) {
|
||||
if tt.expectErr {
|
||||
listBucketsErr(hc, tt.prefix, tt.token, tt.bucketRegion, tt.maxBuckets, apierr.GetAPIError(apierr.ErrInvalidMaxKeys))
|
||||
return
|
||||
}
|
||||
|
||||
resp := listBuckets(hc, tt.prefix, tt.token, tt.bucketRegion, tt.maxBuckets)
|
||||
require.Len(t, resp.Buckets.Buckets, len(tt.expected))
|
||||
require.Equal(t, tt.prefix, resp.Prefix)
|
||||
require.Equal(t, hc.owner.String(), resp.Owner.ID)
|
||||
if len(resp.Buckets.Buckets) > 0 {
|
||||
t.Log(resp.Buckets.Buckets[0].Name)
|
||||
}
|
||||
for i, bkt := range resp.Buckets.Buckets {
|
||||
require.Equal(t, tt.expected[i].Name, bkt.Name)
|
||||
require.Equal(t, tt.expected[i].BucketRegion, bkt.BucketRegion)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("pagination", func(t *testing.T) {
|
||||
t.Run("happy path", func(t *testing.T) {
|
||||
resp := listBuckets(hc, "", "", "", "1")
|
||||
require.Len(t, resp.Buckets.Buckets, 1)
|
||||
require.Equal(t, props[0].Name, resp.Buckets.Buckets[0].Name)
|
||||
require.NotEmpty(t, resp.ContinuationToken)
|
||||
|
||||
resp = listBuckets(hc, "", resp.ContinuationToken, "", "1")
|
||||
require.Len(t, resp.Buckets.Buckets, 1)
|
||||
require.Equal(t, props[1].Name, resp.Buckets.Buckets[0].Name)
|
||||
require.NotEmpty(t, resp.ContinuationToken)
|
||||
|
||||
resp = listBuckets(hc, "", resp.ContinuationToken, "", "1")
|
||||
require.Len(t, resp.Buckets.Buckets, 1)
|
||||
require.Equal(t, props[2].Name, resp.Buckets.Buckets[0].Name)
|
||||
require.Empty(t, resp.ContinuationToken)
|
||||
})
|
||||
|
||||
t.Run("wrong continuation-token", func(t *testing.T) {
|
||||
resp := listBuckets(hc, "", "CebuVwfRpdMqi9dvgV2SUNbrkfteGtudchKKhNabXUu9", "", "1")
|
||||
require.Len(t, resp.Buckets.Buckets, 0)
|
||||
require.Empty(t, resp.ContinuationToken)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listBuckets(hc *handlerContext, prefix, token, bucketRegion, maxBuckets string) ListBucketsResponse {
|
||||
query := url.Values{
|
||||
middleware.QueryPrefix: []string{prefix},
|
||||
middleware.QueryContinuationToken: []string{token},
|
||||
middleware.QueryBucketRegion: []string{bucketRegion},
|
||||
middleware.QueryMaxBuckets: []string{maxBuckets},
|
||||
}
|
||||
w := listBucketsBase(hc, bucketPrm{query: query})
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
var resp ListBucketsResponse
|
||||
err := xml.NewDecoder(w.Body).Decode(&resp)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func listBucketsErr(hc *handlerContext, prefix, token, bucketRegion, maxBuckets string, err apierr.Error) {
|
||||
query := url.Values{
|
||||
middleware.QueryPrefix: []string{prefix},
|
||||
middleware.QueryContinuationToken: []string{token},
|
||||
middleware.QueryBucketRegion: []string{bucketRegion},
|
||||
middleware.QueryMaxBuckets: []string{maxBuckets},
|
||||
}
|
||||
w := listBucketsBase(hc, bucketPrm{query: query})
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func listBucketsBase(hc *handlerContext, prm bucketPrm) *httptest.ResponseRecorder {
|
||||
box, _ := createAccessBox(hc.t)
|
||||
w, r := prepareTestFullRequest(hc, "", "", prm.query, nil)
|
||||
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().ListBucketsHandler(w, r)
|
||||
|
||||
return w
|
||||
}
|
|
@ -29,7 +29,7 @@ func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
||||
return
|
||||
|
@ -112,7 +112,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.GetBucketCors, zap.Error(err))
|
||||
return
|
||||
|
@ -178,7 +178,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
|||
headers = strings.Split(requestHeaders, ", ")
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo, h.cfg.NewXMLDecoder)
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err)
|
||||
return
|
||||
|
|
|
@ -19,14 +19,7 @@ func TestCORSOriginWildcard(t *testing.T) {
|
|||
</CORSRule>
|
||||
</CORSConfiguration>
|
||||
`
|
||||
bodyNoXmlns := `
|
||||
<CORSConfiguration>
|
||||
<CORSRule>
|
||||
<AllowedMethod>GET</AllowedMethod>
|
||||
<AllowedOrigin>*</AllowedOrigin>
|
||||
</CORSRule>
|
||||
</CORSConfiguration>`
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-cors"
|
||||
box, _ := createAccessBox(t)
|
||||
|
@ -46,17 +39,6 @@ func TestCORSOriginWildcard(t *testing.T) {
|
|||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketCorsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
hc.config.useDefaultXMLNS = true
|
||||
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(bodyNoXmlns))
|
||||
ctx = middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketCorsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestPayloadRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketCorsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func TestPreflight(t *testing.T) {
|
||||
|
|
|
@ -65,16 +65,10 @@ func TestMD5HeaderBadOrEmpty(t *testing.T) {
|
|||
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
|
||||
|
||||
headers = map[string]string{
|
||||
api.ContentMD5: "yZRvHQZYwL5V7+k2pcwHLg==",
|
||||
api.ContentMD5: "YWJjMTIzIT8kKiYoKSctPUB+",
|
||||
}
|
||||
|
||||
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrBadDigest)
|
||||
|
||||
headers = map[string]string{
|
||||
api.ContentMD5: "dGhlIHF1aWNrIGJyb3dF",
|
||||
}
|
||||
|
||||
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
func TestGetEncryptedRange(t *testing.T) {
|
||||
|
|
|
@ -74,22 +74,19 @@ func (hc *handlerContextBase) Context() context.Context {
|
|||
|
||||
type configMock struct {
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
placementPolicies map[string]netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
bypassContentEncodingInChunks bool
|
||||
md5Enabled bool
|
||||
tlsTerminationHeader string
|
||||
useDefaultXMLNS bool
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
||||
return c.defaultPolicy
|
||||
}
|
||||
|
||||
func (c *configMock) PlacementPolicy(_, constraint string) (netmap.PlacementPolicy, bool) {
|
||||
policy, ok := c.placementPolicies[constraint]
|
||||
return policy, ok
|
||||
func (c *configMock) PlacementPolicy(_, _ string) (netmap.PlacementPolicy, bool) {
|
||||
return netmap.PlacementPolicy{}, false
|
||||
}
|
||||
|
||||
func (c *configMock) CopiesNumbers(_, locationConstraint string) ([]uint32, bool) {
|
||||
|
@ -102,11 +99,7 @@ func (c *configMock) DefaultCopiesNumbers(_ string) []uint32 {
|
|||
}
|
||||
|
||||
func (c *configMock) NewXMLDecoder(r io.Reader, _ string) *xml.Decoder {
|
||||
dec := xml.NewDecoder(r)
|
||||
if c.useDefaultXMLNS {
|
||||
dec.DefaultSpace = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
return dec
|
||||
return xml.NewDecoder(r)
|
||||
}
|
||||
|
||||
func (c *configMock) BypassContentEncodingInChunks(_ string) bool {
|
||||
|
@ -153,10 +146,6 @@ func (c *configMock) TLSTerminationHeader() string {
|
|||
return c.tlsTerminationHeader
|
||||
}
|
||||
|
||||
func (c *configMock) putLocationConstraint(constraint string) {
|
||||
c.placementPolicies[constraint] = c.defaultPolicy
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
hc, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(zap.NewExample()))
|
||||
require.NoError(t, err)
|
||||
|
@ -223,8 +212,7 @@ func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBas
|
|||
}
|
||||
|
||||
cfg := &configMock{
|
||||
defaultPolicy: pp,
|
||||
placementPolicies: make(map[string]netmap.PlacementPolicy),
|
||||
defaultPolicy: pp,
|
||||
}
|
||||
h := &handler{
|
||||
log: log,
|
||||
|
|
49
api/handler/list.go
Normal file
49
api/handler/list.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
)
|
||||
|
||||
const maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
|
||||
// ListBucketsHandler handles bucket listing requests.
|
||||
func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
own user.ID
|
||||
res *ListBucketsResponse
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
)
|
||||
|
||||
list, err := h.obj.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(list) > 0 {
|
||||
own = list[0].Owner
|
||||
}
|
||||
|
||||
res = &ListBucketsResponse{
|
||||
Owner: Owner{
|
||||
ID: own.String(),
|
||||
DisplayName: own.String(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range list {
|
||||
res.Buckets.Buckets = append(res.Buckets.Buckets, Bucket{
|
||||
Name: item.Name,
|
||||
CreationDate: item.Created.UTC().Format(time.RFC3339),
|
||||
})
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, res); err != nil {
|
||||
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
|
@ -15,8 +15,6 @@ import (
|
|||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
const maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
||||
|
||||
// ListObjectsV1Handler handles objects listing requests for API version 1.
|
||||
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
|
|
@ -744,7 +744,7 @@ func parseCannedACL(header http.Header) (string, error) {
|
|||
return acl, nil
|
||||
}
|
||||
|
||||
return "", apierr.GetAPIErrorWithError(apierr.ErrMalformedACL, fmt.Errorf("unknown acl: %s", acl))
|
||||
return "", fmt.Errorf("unknown acl: %s", acl)
|
||||
}
|
||||
|
||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
|
@ -283,20 +282,12 @@ func TestPutObjectWithInvalidContentMD5(t *testing.T) {
|
|||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
md5HeaderContent := make([]byte, md5.Size)
|
||||
n, err := rand.Read(md5HeaderContent)
|
||||
require.Equal(t, md5.Size, n)
|
||||
require.NoError(t, err)
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(md5HeaderContent))
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrBadDigest))
|
||||
|
||||
w, r = prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
|
||||
content = []byte("content")
|
||||
w, r = prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("")))
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
|
|
|
@ -15,9 +15,6 @@ type ListBucketsResponse struct {
|
|||
Buckets struct {
|
||||
Buckets []Bucket `xml:"Bucket"`
|
||||
} // Buckets are nested
|
||||
|
||||
ContinuationToken string `xml:"ContinuationToken,omitempty"`
|
||||
Prefix string `xml:"Prefix,omitempty"`
|
||||
}
|
||||
|
||||
// ListObjectsV1Response -- format for ListObjectsV1 response.
|
||||
|
@ -54,9 +51,8 @@ type ListObjectsV2Response struct {
|
|||
|
||||
// Bucket container for bucket metadata.
|
||||
type Bucket struct {
|
||||
Name string `xml:"Name"`
|
||||
CreationDate string `xml:"CreationDate"` // time string of format "2006-01-02T15:04:05.000Z"
|
||||
BucketRegion string `xml:"BucketRegion,omitempty"`
|
||||
Name string
|
||||
CreationDate string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
}
|
||||
|
||||
// PolicyStatus contains status of bucket policy.
|
||||
|
|
|
@ -3,9 +3,7 @@ package layer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
|
@ -78,7 +76,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*d
|
|||
return info, nil
|
||||
}
|
||||
|
||||
func (n *Layer) containerList(ctx context.Context, listParams ListBucketsParams) ([]*data.BucketInfo, error) {
|
||||
func (n *Layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
stoken := n.SessionTokenForRead(ctx)
|
||||
|
||||
prm := frostfs.PrmUserContainers{
|
||||
|
@ -104,34 +102,10 @@ func (n *Layer) containerList(ctx context.Context, listParams ListBucketsParams)
|
|||
continue
|
||||
}
|
||||
|
||||
if shouldSkipBucket(info, listParams) {
|
||||
continue
|
||||
}
|
||||
|
||||
list = append(list, info)
|
||||
}
|
||||
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
return list[i].Name < list[j].Name
|
||||
})
|
||||
|
||||
for i, info := range list {
|
||||
if listParams.ContinuationToken != "" && info.Name != listParams.ContinuationToken {
|
||||
continue
|
||||
}
|
||||
return list[i:], nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func shouldSkipBucket(info *data.BucketInfo, prm ListBucketsParams) bool {
|
||||
if !strings.HasPrefix(info.Name, prm.Prefix) ||
|
||||
(prm.BucketRegion != "" && info.LocationConstraint != prm.BucketRegion) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
|
||||
|
|
|
@ -3,7 +3,6 @@ package layer
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -96,8 +95,8 @@ func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
}
|
||||
}
|
||||
|
||||
func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, decoder func(io.Reader, string) *xml.Decoder) (*data.CORSConfiguration, error) {
|
||||
cors, err := n.getCORS(ctx, bktInfo, decoder)
|
||||
func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||
cors, err := n.getCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -240,9 +240,6 @@ var (
|
|||
|
||||
// ErrGlobalDomainIsAlreadyTaken is returned from FrostFS in case of global domain is already taken.
|
||||
ErrGlobalDomainIsAlreadyTaken = errors.New("global domain is already taken")
|
||||
|
||||
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
|
||||
ErrQuotaLimitReached = errors.New("quota limit reached")
|
||||
)
|
||||
|
||||
// FrostFS represents virtual connection to FrostFS network.
|
||||
|
|
|
@ -195,18 +195,6 @@ type (
|
|||
Encode string
|
||||
}
|
||||
|
||||
ListBucketsParams struct {
|
||||
MaxBuckets int
|
||||
Prefix string
|
||||
ContinuationToken string
|
||||
BucketRegion string
|
||||
}
|
||||
|
||||
ListBucketsResult struct {
|
||||
Containers []*data.BucketInfo
|
||||
ContinuationToken string
|
||||
}
|
||||
|
||||
// VersionedObject stores info about objects to delete.
|
||||
VersionedObject struct {
|
||||
Name string
|
||||
|
@ -383,24 +371,8 @@ func (n *Layer) ResolveCID(ctx context.Context, name string) (cid.ID, error) {
|
|||
|
||||
// ListBuckets returns all user containers. The name of the bucket is a container
|
||||
// id. Timestamp is omitted since it is not saved in frostfs container.
|
||||
func (n *Layer) ListBuckets(ctx context.Context, params ListBucketsParams) (ListBucketsResult, error) {
|
||||
var result ListBucketsResult
|
||||
var err error
|
||||
|
||||
if params.MaxBuckets == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
result.Containers, err = n.containerList(ctx, params)
|
||||
if err != nil {
|
||||
return ListBucketsResult{}, err
|
||||
}
|
||||
if len(result.Containers) > params.MaxBuckets {
|
||||
result.ContinuationToken = result.Containers[params.MaxBuckets].Name
|
||||
result.Containers = result.Containers[:params.MaxBuckets]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
func (n *Layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
return n.containerList(ctx)
|
||||
}
|
||||
|
||||
// GetObject from storage.
|
||||
|
|
|
@ -289,7 +289,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
headerMd5Hash, err := base64.StdEncoding.DecodeString(*p.ContentMD5)
|
||||
if err != nil || len(headerMd5Hash) != md5.Size {
|
||||
if err != nil {
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
if !bytes.Equal(headerMd5Hash, createdObj.MD5Sum) {
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -14,7 +13,6 @@ import (
|
|||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -162,7 +160,7 @@ func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion)
|
|||
return lockInfo, nil
|
||||
}
|
||||
|
||||
func (n *Layer) getCORS(ctx context.Context, bkt *data.BucketInfo, decoder func(io.Reader, string) *xml.Decoder) (*data.CORSConfiguration, error) {
|
||||
func (n *Layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||
owner := n.BearerOwner(ctx)
|
||||
if cors := n.cache.GetCORS(owner, bkt); cors != nil {
|
||||
return cors, nil
|
||||
|
@ -191,7 +189,7 @@ func (n *Layer) getCORS(ctx context.Context, bkt *data.BucketInfo, decoder func(
|
|||
}
|
||||
|
||||
cors := &data.CORSConfiguration{}
|
||||
if err = decoder(obj.Payload, "").Decode(&cors); err != nil {
|
||||
if err = xml.NewDecoder(obj.Payload).Decode(&cors); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal cors: %w", err)
|
||||
}
|
||||
|
||||
|
@ -217,7 +215,6 @@ func (n *Layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo)
|
|||
return nil, err
|
||||
}
|
||||
settings = &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
||||
n.reqLogger(ctx).Debug(logs.BucketSettingsNotFoundUseDefaults)
|
||||
}
|
||||
|
||||
n.cache.PutSettings(owner, bktInfo, settings)
|
||||
|
|
116
cmd/s3-gw/app.go
116
cmd/s3-gw/app.go
|
@ -65,7 +65,7 @@ type (
|
|||
App struct {
|
||||
ctr s3middleware.Center
|
||||
log *zap.Logger
|
||||
cfg *appCfg
|
||||
cfg *viper.Viper
|
||||
pool *pool.Pool
|
||||
treePool *treepool.Pool
|
||||
key *keys.PrivateKey
|
||||
|
@ -161,15 +161,15 @@ func (s *loggerSettings) setMetrics(appMetrics *metrics.AppMetrics) {
|
|||
s.appMetrics = appMetrics
|
||||
}
|
||||
|
||||
func newApp(ctx context.Context, cfg *appCfg) *App {
|
||||
func newApp(ctx context.Context, v *viper.Viper) *App {
|
||||
logSettings := &loggerSettings{}
|
||||
log := pickLogger(cfg.config(), logSettings)
|
||||
settings := newAppSettings(log, cfg.config())
|
||||
appCache := layer.NewCache(getCacheOptions(cfg.config(), log.logger))
|
||||
log := pickLogger(v, logSettings)
|
||||
settings := newAppSettings(log, v)
|
||||
appCache := layer.NewCache(getCacheOptions(v, log.logger))
|
||||
|
||||
app := &App{
|
||||
log: log.logger,
|
||||
cfg: cfg,
|
||||
cfg: v,
|
||||
cache: appCache,
|
||||
|
||||
webDone: make(chan struct{}, 1),
|
||||
|
@ -184,10 +184,6 @@ func newApp(ctx context.Context, cfg *appCfg) *App {
|
|||
return app
|
||||
}
|
||||
|
||||
func (a *App) config() *viper.Viper {
|
||||
return a.cfg.config()
|
||||
}
|
||||
|
||||
func (a *App) init(ctx context.Context) {
|
||||
a.initPools(ctx)
|
||||
a.initResolver()
|
||||
|
@ -202,7 +198,7 @@ func (a *App) init(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) initAuthCenter(ctx context.Context) {
|
||||
if a.config().IsSet(cfgContainersAccessBox) {
|
||||
if a.cfg.IsSet(cfgContainersAccessBox) {
|
||||
cnrID, err := a.resolveContainerID(ctx, cfgContainersAccessBox)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err))
|
||||
|
@ -213,11 +209,11 @@ func (a *App) initAuthCenter(ctx context.Context) {
|
|||
cfg := tokens.Config{
|
||||
FrostFS: frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(a.pool, a.key), a.log),
|
||||
Key: a.key,
|
||||
CacheConfig: getAccessBoxCacheConfig(a.config(), a.log),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(a.config(), a.log),
|
||||
CacheConfig: getAccessBoxCacheConfig(a.cfg, a.log),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(a.cfg, a.log),
|
||||
}
|
||||
|
||||
a.ctr = auth.New(tokens.New(cfg), a.config().GetStringSlice(cfgAllowedAccessKeyIDPrefixes), a.settings)
|
||||
a.ctr = auth.New(tokens.New(cfg), a.cfg.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), a.settings)
|
||||
}
|
||||
|
||||
func (a *App) initLayer(ctx context.Context) {
|
||||
|
@ -231,7 +227,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
||||
|
||||
var corsCnrInfo *data.BucketInfo
|
||||
if a.config().IsSet(cfgContainersCORS) {
|
||||
if a.cfg.IsSet(cfgContainersCORS) {
|
||||
corsCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersCORS)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err))
|
||||
|
@ -239,7 +235,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
|
||||
var lifecycleCnrInfo *data.BucketInfo
|
||||
if a.config().IsSet(cfgContainersLifecycle) {
|
||||
if a.cfg.IsSet(cfgContainersLifecycle) {
|
||||
lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err))
|
||||
|
@ -603,7 +599,7 @@ func (a *App) initMetrics() {
|
|||
Logger: a.log,
|
||||
PoolStatistics: frostfs.NewPoolStatistic(a.pool),
|
||||
TreeStatistic: a.treePool,
|
||||
Enabled: a.config().GetBool(cfgPrometheusEnabled),
|
||||
Enabled: a.cfg.GetBool(cfgPrometheusEnabled),
|
||||
}
|
||||
|
||||
a.metrics = metrics.NewAppMetrics(cfg)
|
||||
|
@ -613,9 +609,9 @@ func (a *App) initMetrics() {
|
|||
|
||||
func (a *App) initFrostfsID(ctx context.Context) {
|
||||
cli, err := ffidcontract.New(ctx, ffidcontract.Config{
|
||||
RPCAddress: a.config().GetString(cfgRPCEndpoint),
|
||||
Contract: a.config().GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.config().GetString(cfgProxyContract),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
|
@ -627,7 +623,7 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
}
|
||||
|
||||
a.frostfsid, err = frostfsid.NewFrostFSID(frostfsid.Config{
|
||||
Cache: cache.NewFrostfsIDCache(getFrostfsIDCacheConfig(a.config(), a.log)),
|
||||
Cache: cache.NewFrostfsIDCache(getFrostfsIDCacheConfig(a.cfg, a.log)),
|
||||
FrostFSID: cli,
|
||||
Logger: a.log,
|
||||
})
|
||||
|
@ -638,9 +634,9 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
|
||||
func (a *App) initPolicyStorage(ctx context.Context) {
|
||||
policyContract, err := contract.New(ctx, contract.Config{
|
||||
RPCAddress: a.config().GetString(cfgRPCEndpoint),
|
||||
Contract: a.config().GetString(cfgPolicyContract),
|
||||
ProxyContract: a.config().GetString(cfgProxyContract),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgPolicyContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
|
@ -653,7 +649,7 @@ func (a *App) initPolicyStorage(ctx context.Context) {
|
|||
|
||||
a.policyStorage = policy.NewStorage(policy.StorageConfig{
|
||||
Contract: policyContract,
|
||||
Cache: cache.NewMorphPolicyCache(getMorphPolicyCacheConfig(a.config(), a.log)),
|
||||
Cache: cache.NewMorphPolicyCache(getMorphPolicyCacheConfig(a.cfg, a.log)),
|
||||
Log: a.log,
|
||||
})
|
||||
}
|
||||
|
@ -669,13 +665,13 @@ func (a *App) initResolver() {
|
|||
func (a *App) getResolverConfig() *resolver.Config {
|
||||
return &resolver.Config{
|
||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||
RPCAddress: a.config().GetString(cfgRPCEndpoint),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getResolverOrder() []string {
|
||||
order := a.config().GetStringSlice(cfgResolveOrder)
|
||||
if a.config().GetString(cfgRPCEndpoint) == "" {
|
||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||
if a.cfg.GetString(cfgRPCEndpoint) == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
@ -693,15 +689,15 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
instanceID = a.servers[0].Address()
|
||||
}
|
||||
cfg := tracing.Config{
|
||||
Enabled: a.config().GetBool(cfgTracingEnabled),
|
||||
Exporter: tracing.Exporter(a.config().GetString(cfgTracingExporter)),
|
||||
Endpoint: a.config().GetString(cfgTracingEndpoint),
|
||||
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||
Service: "frostfs-s3-gw",
|
||||
InstanceID: instanceID,
|
||||
Version: version.Version,
|
||||
}
|
||||
|
||||
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
caBytes, err := os.ReadFile(trustedCa)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
|
@ -716,7 +712,7 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
cfg.ServerCaCertPool = certPool
|
||||
}
|
||||
|
||||
attributes, err := fetchTracingAttributes(a.config())
|
||||
attributes, err := fetchTracingAttributes(a.cfg)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
|
@ -764,8 +760,8 @@ func (a *App) initPools(ctx context.Context) {
|
|||
var prm pool.InitParameters
|
||||
var prmTree treepool.InitParameters
|
||||
|
||||
password := wallet.GetPassword(a.config(), cfgWalletPassphrase)
|
||||
key, err := wallet.GetKeyFromPath(a.config().GetString(cfgWalletPath), a.config().GetString(cfgWalletAddress), password)
|
||||
password := wallet.GetPassword(a.cfg, cfgWalletPassphrase)
|
||||
key, err := wallet.GetKeyFromPath(a.cfg.GetString(cfgWalletPath), a.cfg.GetString(cfgWalletAddress), password)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||
}
|
||||
|
@ -774,36 +770,36 @@ func (a *App) initPools(ctx context.Context) {
|
|||
prmTree.SetKey(key)
|
||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
|
||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
||||
for _, peer := range fetchPeers(a.log, a.cfg) {
|
||||
prm.AddNode(peer)
|
||||
prmTree.AddNode(peer)
|
||||
}
|
||||
|
||||
connTimeout := fetchConnectTimeout(a.config())
|
||||
connTimeout := fetchConnectTimeout(a.cfg)
|
||||
prm.SetNodeDialTimeout(connTimeout)
|
||||
prmTree.SetNodeDialTimeout(connTimeout)
|
||||
|
||||
streamTimeout := fetchStreamTimeout(a.config())
|
||||
streamTimeout := fetchStreamTimeout(a.cfg)
|
||||
prm.SetNodeStreamTimeout(streamTimeout)
|
||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||
|
||||
healthCheckTimeout := fetchHealthCheckTimeout(a.config())
|
||||
healthCheckTimeout := fetchHealthCheckTimeout(a.cfg)
|
||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
|
||||
rebalanceInterval := fetchRebalanceInterval(a.config())
|
||||
rebalanceInterval := fetchRebalanceInterval(a.cfg)
|
||||
prm.SetClientRebalanceInterval(rebalanceInterval)
|
||||
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
||||
|
||||
errorThreshold := fetchErrorThreshold(a.config())
|
||||
errorThreshold := fetchErrorThreshold(a.cfg)
|
||||
prm.SetErrorThreshold(errorThreshold)
|
||||
|
||||
prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(a.config()))
|
||||
prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(a.cfg))
|
||||
|
||||
prm.SetLogger(a.log)
|
||||
prmTree.SetLogger(a.log)
|
||||
|
||||
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
|
||||
prmTree.SetMaxRequestAttempts(a.cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||
|
||||
interceptors := []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||
|
@ -822,7 +818,7 @@ func (a *App) initPools(ctx context.Context) {
|
|||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
if a.config().GetBool(cfgTreePoolNetmapSupport) {
|
||||
if a.cfg.GetBool(cfgTreePoolNetmapSupport) {
|
||||
prmTree.SetNetMapInfoSource(frostfs.NewSource(frostfs.NewFrostFS(p, key), a.cache))
|
||||
}
|
||||
|
||||
|
@ -899,10 +895,10 @@ func (a *App) Serve(ctx context.Context) {
|
|||
srv := new(http.Server)
|
||||
srv.Handler = chiRouter
|
||||
srv.ErrorLog = zap.NewStdLog(a.log)
|
||||
srv.ReadTimeout = a.config().GetDuration(cfgWebReadTimeout)
|
||||
srv.ReadHeaderTimeout = a.config().GetDuration(cfgWebReadHeaderTimeout)
|
||||
srv.WriteTimeout = a.config().GetDuration(cfgWebWriteTimeout)
|
||||
srv.IdleTimeout = a.config().GetDuration(cfgWebIdleTimeout)
|
||||
srv.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||
srv.ReadHeaderTimeout = a.cfg.GetDuration(cfgWebReadHeaderTimeout)
|
||||
srv.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||
srv.IdleTimeout = a.cfg.GetDuration(cfgWebIdleTimeout)
|
||||
|
||||
a.startServices()
|
||||
|
||||
|
@ -955,11 +951,11 @@ func shutdownContext() (context.Context, context.CancelFunc) {
|
|||
func (a *App) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
|
||||
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
|
||||
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||
return
|
||||
}
|
||||
if err := a.cfg.reload(); err != nil {
|
||||
if err := readInConfig(a.cfg); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
@ -979,7 +975,7 @@ func (a *App) configReload(ctx context.Context) {
|
|||
|
||||
a.updateSettings()
|
||||
|
||||
a.metrics.SetEnabled(a.config().GetBool(cfgPrometheusEnabled))
|
||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.initTracing(ctx)
|
||||
a.setHealthStatus()
|
||||
|
||||
|
@ -987,33 +983,33 @@ func (a *App) configReload(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) updateSettings() {
|
||||
if lvl, err := getLogLevel(a.config()); err != nil {
|
||||
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||
} else {
|
||||
a.settings.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
|
||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.cfg, a.log)); err != nil {
|
||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
||||
}
|
||||
|
||||
a.settings.update(a.config(), a.log)
|
||||
a.settings.update(a.cfg, a.log)
|
||||
}
|
||||
|
||||
func (a *App) startServices() {
|
||||
a.services = a.services[:0]
|
||||
|
||||
pprofService := NewPprofService(a.config(), a.log)
|
||||
pprofService := NewPprofService(a.cfg, a.log)
|
||||
a.services = append(a.services, pprofService)
|
||||
go pprofService.Start()
|
||||
|
||||
prometheusService := NewPrometheusService(a.config(), a.log, a.metrics.Handler())
|
||||
prometheusService := NewPrometheusService(a.cfg, a.log, a.metrics.Handler())
|
||||
a.services = append(a.services, prometheusService)
|
||||
go prometheusService.Start()
|
||||
}
|
||||
|
||||
func (a *App) initServers(ctx context.Context) {
|
||||
serversInfo := fetchServers(a.config(), a.log)
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
|
||||
a.servers = make([]Server, 0, len(serversInfo))
|
||||
for _, serverInfo := range serversInfo {
|
||||
|
@ -1040,7 +1036,7 @@ func (a *App) initServers(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) updateServers() error {
|
||||
serversInfo := fetchServers(a.config(), a.log)
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
@ -1175,7 +1171,7 @@ func (a *App) setRuntimeParameters() {
|
|||
return
|
||||
}
|
||||
|
||||
softMemoryLimit := fetchSoftMemoryLimit(a.config())
|
||||
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
|
||||
previous := debug.SetMemoryLimit(softMemoryLimit)
|
||||
if softMemoryLimit != previous {
|
||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
|
@ -1251,7 +1247,7 @@ func (a *App) fetchContainerInfo(ctx context.Context, cfgKey string) (info *data
|
|||
}
|
||||
|
||||
func (a *App) resolveContainerID(ctx context.Context, cfgKey string) (cid.ID, error) {
|
||||
containerString := a.config().GetString(cfgKey)
|
||||
containerString := a.cfg.GetString(cfgKey)
|
||||
|
||||
var id cid.ID
|
||||
if err := id.DecodeString(containerString); err != nil {
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -302,49 +301,6 @@ var ignore = map[string]struct{}{
|
|||
cmdVersion: {},
|
||||
}
|
||||
|
||||
type appCfg struct {
|
||||
flags *pflag.FlagSet
|
||||
|
||||
mu sync.RWMutex
|
||||
settings *viper.Viper
|
||||
}
|
||||
|
||||
func (a *appCfg) reload() error {
|
||||
old := a.config()
|
||||
|
||||
v, err := newViper(a.flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if old.IsSet(cmdConfig) {
|
||||
v.Set(cmdConfig, old.Get(cmdConfig))
|
||||
}
|
||||
if old.IsSet(cmdConfigDir) {
|
||||
v.Set(cmdConfigDir, old.Get(cmdConfigDir))
|
||||
}
|
||||
|
||||
if err = readInConfig(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.setConfig(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *appCfg) config() *viper.Viper {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
|
||||
return a.settings
|
||||
}
|
||||
|
||||
func (a *appCfg) setConfig(v *viper.Viper) {
|
||||
a.mu.Lock()
|
||||
a.settings = v
|
||||
a.mu.Unlock()
|
||||
}
|
||||
|
||||
func fetchConnectTimeout(cfg *viper.Viper) time.Duration {
|
||||
connTimeout := cfg.GetDuration(cfgConnectTimeout)
|
||||
if connTimeout <= 0 {
|
||||
|
@ -922,7 +878,7 @@ func fetchTombstoneWorkerPoolSize(v *viper.Viper) int {
|
|||
return tombstoneWorkerPoolSize
|
||||
}
|
||||
|
||||
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
||||
func newSettings() *viper.Viper {
|
||||
v := viper.New()
|
||||
|
||||
v.AutomaticEnv()
|
||||
|
@ -931,16 +887,6 @@ func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
|||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
v.AllowEmptyEnv(true)
|
||||
|
||||
setDefaults(v, flags)
|
||||
|
||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||
}
|
||||
|
||||
return v, bindFlags(v, flags)
|
||||
}
|
||||
|
||||
func newSettings() *appCfg {
|
||||
// flags setup:
|
||||
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
||||
flags.SetOutput(os.Stdout)
|
||||
|
@ -968,71 +914,15 @@ func newSettings() *appCfg {
|
|||
flags.String(cfgTLSCertFile, "", "TLS certificate file to use")
|
||||
flags.String(cfgTLSKeyFile, "", "TLS key file to use")
|
||||
|
||||
flags.StringArrayP(cfgPeers, "p", nil, "set FrostFS nodes")
|
||||
peers := flags.StringArrayP(cfgPeers, "p", nil, "set FrostFS nodes")
|
||||
|
||||
flags.StringP(cfgRPCEndpoint, "r", "", "set RPC endpoint")
|
||||
flags.StringSlice(cfgResolveOrder, []string{resolver.DNSResolver}, "set bucket name resolve order")
|
||||
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.DNSResolver}, "set bucket name resolve order")
|
||||
|
||||
flags.StringSliceP(cfgListenDomains, "d", nil, "set domains to be listened")
|
||||
domains := flags.StringSliceP(cfgListenDomains, "d", nil, "set domains to be listened")
|
||||
|
||||
if err := flags.Parse(os.Args); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// set defaults:
|
||||
|
||||
v, err := newViper(flags)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("bind flags: %w", err))
|
||||
}
|
||||
|
||||
switch {
|
||||
case help != nil && *help:
|
||||
fmt.Printf("FrostFS S3 gateway %s\n", version.Version)
|
||||
flags.PrintDefaults()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Default environments:")
|
||||
fmt.Println()
|
||||
keys := v.AllKeys()
|
||||
sort.Strings(keys)
|
||||
|
||||
for i := range keys {
|
||||
if _, ok := ignore[keys[i]]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultValue := v.GetString(keys[i])
|
||||
if len(defaultValue) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
k := strings.Replace(keys[i], ".", "_", -1)
|
||||
fmt.Printf("%s_%s = %s\n", envPrefix, strings.ToUpper(k), defaultValue)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Peers preset:")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
fmt.Printf("%s_%s_[N]_WEIGHT = 0..1 (float)\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
|
||||
os.Exit(0)
|
||||
case versionFlag != nil && *versionFlag:
|
||||
fmt.Printf("FrostFS S3 Gateway\nVersion: %s\nGoVersion: %s\n", version.Version, runtime.Version())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err = readInConfig(v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &appCfg{
|
||||
flags: flags,
|
||||
settings: v,
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
|
||||
v.SetDefault(cfgAccessBoxCacheRemovingCheckInterval, defaultAccessBoxCacheRemovingCheckInterval)
|
||||
|
||||
// logger:
|
||||
|
@ -1096,21 +986,78 @@ func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
|
|||
// multinet
|
||||
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
|
||||
|
||||
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
|
||||
v.SetDefault(cfgResolveOrder, resolveMethods)
|
||||
// Bind flags
|
||||
if err := bindFlags(v, flags); err != nil {
|
||||
panic(fmt.Errorf("bind flags: %w", err))
|
||||
}
|
||||
|
||||
if peers, err := flags.GetStringArray(cfgPeers); err == nil {
|
||||
for i := range peers {
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i])
|
||||
if err := flags.Parse(os.Args); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||
}
|
||||
|
||||
if resolveMethods != nil {
|
||||
v.SetDefault(cfgResolveOrder, *resolveMethods)
|
||||
}
|
||||
|
||||
if peers != nil && len(*peers) > 0 {
|
||||
for i := range *peers {
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
||||
}
|
||||
}
|
||||
|
||||
if domains, err := flags.GetStringSlice(cfgListenDomains); err == nil && len(domains) > 0 {
|
||||
v.SetDefault(cfgListenDomains, domains)
|
||||
if domains != nil && len(*domains) > 0 {
|
||||
v.SetDefault(cfgListenDomains, *domains)
|
||||
}
|
||||
|
||||
switch {
|
||||
case help != nil && *help:
|
||||
fmt.Printf("FrostFS S3 gateway %s\n", version.Version)
|
||||
flags.PrintDefaults()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Default environments:")
|
||||
fmt.Println()
|
||||
keys := v.AllKeys()
|
||||
sort.Strings(keys)
|
||||
|
||||
for i := range keys {
|
||||
if _, ok := ignore[keys[i]]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultValue := v.GetString(keys[i])
|
||||
if len(defaultValue) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
k := strings.Replace(keys[i], ".", "_", -1)
|
||||
fmt.Printf("%s_%s = %s\n", envPrefix, strings.ToUpper(k), defaultValue)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Peers preset:")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
fmt.Printf("%s_%s_[N]_WEIGHT = 0..1 (float)\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
|
||||
os.Exit(0)
|
||||
case versionFlag != nil && *versionFlag:
|
||||
fmt.Printf("FrostFS S3 Gateway\nVersion: %s\nGoVersion: %s\n", version.Version, runtime.Version())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := readInConfig(v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigReload(t *testing.T) {
|
||||
f, err := os.CreateTemp("", "conf")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, os.Remove(f.Name()))
|
||||
}()
|
||||
|
||||
confData := `
|
||||
pprof:
|
||||
enabled: true
|
||||
|
||||
frostfsid:
|
||||
contract: name.nns
|
||||
|
||||
resolve_order:
|
||||
- nns
|
||||
`
|
||||
|
||||
_, err = f.WriteString(confData)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
cfg := newSettings()
|
||||
|
||||
require.NoError(t, cfg.flags.Parse([]string{"--config", f.Name(), "--max_clients_count", "10"}))
|
||||
require.NoError(t, cfg.reload())
|
||||
|
||||
require.True(t, cfg.config().GetBool(cfgPProfEnabled))
|
||||
require.Equal(t, "name.nns", cfg.config().GetString(cfgFrostfsIDContract))
|
||||
require.Equal(t, []string{resolver.NNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
||||
require.Equal(t, 10, cfg.config().GetInt(cfgMaxClientsCount))
|
||||
|
||||
require.NoError(t, os.Truncate(f.Name(), 0))
|
||||
require.NoError(t, cfg.reload())
|
||||
|
||||
require.False(t, cfg.config().GetBool(cfgPProfEnabled))
|
||||
require.Equal(t, "frostfsid.frostfs", cfg.config().GetString(cfgFrostfsIDContract))
|
||||
require.Equal(t, []string{resolver.DNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
||||
require.Equal(t, 10, cfg.config().GetInt(cfgMaxClientsCount))
|
||||
}
|
|
@ -8,9 +8,9 @@ import (
|
|||
|
||||
func main() {
|
||||
g, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
cfg := newSettings()
|
||||
v := newSettings()
|
||||
|
||||
a := newApp(g, cfg)
|
||||
a := newApp(g, v)
|
||||
|
||||
go a.Serve(g)
|
||||
|
||||
|
|
|
@ -486,9 +486,6 @@ func handleObjectError(msg string, err error) error {
|
|||
}
|
||||
|
||||
if reason, ok := frosterr.IsErrObjectAccessDenied(err); ok {
|
||||
if strings.Contains(reason, "limit reached") {
|
||||
return fmt.Errorf("%s: %w: %s", msg, frostfs.ErrQuotaLimitReached, reason)
|
||||
}
|
||||
return fmt.Errorf("%s: %w: %s", msg, frostfs.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,49 +8,31 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestHandleObjectError(t *testing.T) {
|
||||
msg := "some msg"
|
||||
func TestErrorChecking(t *testing.T) {
|
||||
reason := "some reason"
|
||||
err := new(apistatus.ObjectAccessDenied)
|
||||
err.WriteReason(reason)
|
||||
|
||||
t.Run("nil error", func(t *testing.T) {
|
||||
err := handleObjectError(msg, nil)
|
||||
require.Nil(t, err)
|
||||
})
|
||||
var wrappedError error
|
||||
|
||||
t.Run("simple access denied", func(t *testing.T) {
|
||||
reason := "some reason"
|
||||
inputErr := new(apistatus.ObjectAccessDenied)
|
||||
inputErr.WriteReason(reason)
|
||||
if fetchedReason, ok := frosterr.IsErrObjectAccessDenied(err); ok {
|
||||
wrappedError = fmt.Errorf("%w: %s", frostfs.ErrAccessDenied, fetchedReason)
|
||||
}
|
||||
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, frostfs.ErrAccessDenied)
|
||||
require.Contains(t, err.Error(), reason)
|
||||
require.Contains(t, err.Error(), msg)
|
||||
})
|
||||
|
||||
t.Run("access denied - quota reached", func(t *testing.T) {
|
||||
reason := "Quota limit reached"
|
||||
inputErr := new(apistatus.ObjectAccessDenied)
|
||||
inputErr.WriteReason(reason)
|
||||
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, frostfs.ErrQuotaLimitReached)
|
||||
require.Contains(t, err.Error(), reason)
|
||||
require.Contains(t, err.Error(), msg)
|
||||
})
|
||||
require.ErrorIs(t, wrappedError, frostfs.ErrAccessDenied)
|
||||
require.Contains(t, wrappedError.Error(), reason)
|
||||
}
|
||||
|
||||
func TestErrorTimeoutChecking(t *testing.T) {
|
||||
t.Run("simple timeout", func(t *testing.T) {
|
||||
inputErr := errors.New("timeout")
|
||||
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, frostfs.ErrGatewayTimeout)
|
||||
require.Contains(t, err.Error(), inputErr.Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
require.True(t, frosterr.IsTimeoutError(errors.New("timeout")))
|
||||
})
|
||||
|
||||
t.Run("deadline exceeded", func(t *testing.T) {
|
||||
|
@ -58,35 +40,11 @@ func TestHandleObjectError(t *testing.T) {
|
|||
defer cancel()
|
||||
<-ctx.Done()
|
||||
|
||||
err := handleObjectError(msg, ctx.Err())
|
||||
require.ErrorIs(t, err, frostfs.ErrGatewayTimeout)
|
||||
require.Contains(t, err.Error(), ctx.Err().Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
require.True(t, frosterr.IsTimeoutError(ctx.Err()))
|
||||
})
|
||||
|
||||
t.Run("grpc deadline exceeded", func(t *testing.T) {
|
||||
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
||||
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, frostfs.ErrGatewayTimeout)
|
||||
require.Contains(t, err.Error(), inputErr.Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
})
|
||||
|
||||
t.Run("global domain already", func(t *testing.T) {
|
||||
inputErr := errors.New("global domain is already taken")
|
||||
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, frostfs.ErrGlobalDomainIsAlreadyTaken)
|
||||
require.Contains(t, err.Error(), inputErr.Error())
|
||||
require.Contains(t, err.Error(), msg)
|
||||
})
|
||||
|
||||
t.Run("unknown error", func(t *testing.T) {
|
||||
inputErr := errors.New("unknown error")
|
||||
|
||||
err := handleObjectError(msg, inputErr)
|
||||
require.ErrorIs(t, err, inputErr)
|
||||
require.Contains(t, err.Error(), msg)
|
||||
err := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
||||
require.True(t, frosterr.IsTimeoutError(err))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -185,5 +185,4 @@ const (
|
|||
FailedToPutTombstones = "failed to put tombstones"
|
||||
WarnDomainContainsPort = "the domain contains a port, domain skipped"
|
||||
CouldntCacheNetmap = "couldn't cache netmap"
|
||||
BucketSettingsNotFoundUseDefaults = "bucket settings not found, use defaults"
|
||||
)
|
||||
|
|
Loading…
Reference in a new issue