forked from TrueCloudLab/frostfs-s3-gw
[#667] Use separate copies numbers for system containers
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
parent
42d6fc3fc6
commit
949fc0b484
15 changed files with 311 additions and 125 deletions
|
@ -6,6 +6,7 @@ This document outlines major changes between releases.
|
|||
|
||||
### Fixed
|
||||
- Number of bucket tags increased to 50 (#613)
|
||||
- Use own copies number for CORS and Lifecycle containers (#667)
|
||||
|
||||
## [0.32.13] - 2025-03-10
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ type (
|
|||
RetryStrategy() RetryStrategy
|
||||
TLSTerminationHeader() string
|
||||
ListingKeepaliveThrottle() time.Duration
|
||||
CORSCopiesNumbers() []uint32
|
||||
LifecycleCopiesNumbers() []uint32
|
||||
}
|
||||
|
||||
FrostFSID interface {
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestHandler_ListBucketsHandler(t *testing.T) {
|
|||
const defaultConstraint = "default"
|
||||
|
||||
region := "us-west-1"
|
||||
hc := prepareWithoutCORSHandlerContext(t)
|
||||
hc := prepareWithoutContainersHandlerContext(t, true, true)
|
||||
hc.config.putLocationConstraint(region)
|
||||
|
||||
props := []Bucket{
|
||||
|
|
|
@ -61,16 +61,11 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
p := &layer.PutCORSParams{
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
NewDecoder: h.cfg.NewXMLDecoder,
|
||||
UserAgent: r.UserAgent(),
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
NewDecoder: h.cfg.NewXMLDecoder,
|
||||
UserAgent: r.UserAgent(),
|
||||
CopiesNumbers: h.cfg.CORSCopiesNumbers(),
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketCORS(ctx, p); err != nil {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -364,6 +366,30 @@ func addCORSToTree(hc *handlerContext, cors string, bkt *data.BucketInfo, corsCn
|
|||
require.NoError(hc.t, err)
|
||||
}
|
||||
|
||||
func TestPutBucketCORSCopiesNumbers(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-cors"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
cfg := &data.CORSConfiguration{
|
||||
CORSRules: []data.CORSRule{{
|
||||
AllowedHeaders: []string{"*"},
|
||||
AllowedMethods: []string{"GET"},
|
||||
AllowedOrigins: []string{"*"},
|
||||
}},
|
||||
}
|
||||
|
||||
hc.config.corsCopiesNumbers = []uint32{1}
|
||||
hc.config.copiesNumbers = map[string][]uint32{"default": {2}}
|
||||
|
||||
putBucketCORSConfiguration(hc, bktName, cfg, map[string]string{"X-Amz-Meta-Frostfs-Copies-Number": "3"}, true)
|
||||
|
||||
objs := hc.tp.Objects()
|
||||
require.Len(t, objs, 1)
|
||||
require.EqualValues(t, hc.config.corsCopiesNumbers, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
|
||||
}
|
||||
|
||||
func requireEqualCORS(t *testing.T, expected string, actual string) {
|
||||
expectedCORS := &data.CORSConfiguration{}
|
||||
err := xml.NewDecoder(strings.NewReader(expected)).Decode(expectedCORS)
|
||||
|
@ -398,3 +424,28 @@ func getBucketCORS(hc *handlerContext, bktName string) *httptest.ResponseRecorde
|
|||
assertStatus(hc.t, w, http.StatusOK)
|
||||
return w
|
||||
}
|
||||
|
||||
func putBucketCORSConfiguration(hc *handlerContext, bktName string, cfg *data.CORSConfiguration, headers map[string]string, addMD5 bool) {
|
||||
w := putBucketCORSConfigurationBase(hc, bktName, cfg, headers, addMD5)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func putBucketCORSConfigurationBase(hc *handlerContext, bktName string, cfg *data.CORSConfiguration, headers map[string]string, addMD5 bool) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||
|
||||
for k, v := range headers {
|
||||
r.Header.Set(k, v)
|
||||
}
|
||||
|
||||
if addMD5 {
|
||||
rawBody, err := xml.Marshal(cfg)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
hash := md5.New()
|
||||
hash.Write(rawBody)
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
|
||||
}
|
||||
|
||||
hc.Handler().PutBucketCorsHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
|
|
@ -81,6 +81,8 @@ type configMock struct {
|
|||
placementPolicies map[string]netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
corsCopiesNumbers []uint32
|
||||
lifecycleCopiesNumbers []uint32
|
||||
bypassContentEncodingInChunks bool
|
||||
md5Enabled bool
|
||||
tlsTerminationHeader string
|
||||
|
@ -161,13 +163,22 @@ func (c *configMock) ListingKeepaliveThrottle() time.Duration {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (c *configMock) CORSCopiesNumbers() []uint32 {
|
||||
return c.corsCopiesNumbers
|
||||
}
|
||||
|
||||
func (c *configMock) LifecycleCopiesNumbers() []uint32 {
|
||||
return c.lifecycleCopiesNumbers
|
||||
}
|
||||
|
||||
func (c *configMock) putLocationConstraint(constraint string) {
|
||||
c.placementPolicies[constraint] = c.defaultPolicy
|
||||
}
|
||||
|
||||
type handlerConfig struct {
|
||||
cacheCfg *layer.CachesConfig
|
||||
withoutCORS bool
|
||||
cacheCfg *layer.CachesConfig
|
||||
withoutCORS bool
|
||||
withoutLifecycle bool
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
|
@ -180,11 +191,12 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
|||
}
|
||||
}
|
||||
|
||||
func prepareWithoutCORSHandlerContext(t *testing.T) *handlerContext {
|
||||
func prepareWithoutContainersHandlerContext(t *testing.T, cors, lifecycle bool) *handlerContext {
|
||||
log := zaptest.NewLogger(t)
|
||||
hc, err := prepareHandlerContextBase(&handlerConfig{
|
||||
cacheCfg: layer.DefaultCachesConfigs(log),
|
||||
withoutCORS: true,
|
||||
cacheCfg: layer.DefaultCachesConfigs(log),
|
||||
withoutCORS: cors,
|
||||
withoutLifecycle: lifecycle,
|
||||
}, log)
|
||||
require.NoError(t, err)
|
||||
return &handlerContext{
|
||||
|
@ -247,12 +259,18 @@ func prepareHandlerContextBase(config *handlerConfig, log *zap.Logger) (*handler
|
|||
}
|
||||
|
||||
if !config.withoutCORS {
|
||||
layerCfg.CORSCnrInfo, err = createCORSContainer(key, tp)
|
||||
layerCfg.CORSCnrInfo, err = createContainer(key, tp, "cors")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !config.withoutLifecycle {
|
||||
if layerCfg.LifecycleCnrInfo, err = createContainer(key, tp, "lifecycle"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pp netmap.PlacementPolicy
|
||||
err = pp.DecodeString("REP 1")
|
||||
if err != nil {
|
||||
|
@ -296,14 +314,13 @@ func prepareHandlerContextBase(config *handlerConfig, log *zap.Logger) (*handler
|
|||
return hc, nil
|
||||
}
|
||||
|
||||
func createCORSContainer(key *keys.PrivateKey, tp *layer.TestFrostFS) (*data.BucketInfo, error) {
|
||||
func createContainer(key *keys.PrivateKey, tp *layer.TestFrostFS, bktName string) (*data.BucketInfo, error) {
|
||||
bearerToken := bearertest.Token()
|
||||
err := bearerToken.Sign(key.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bktName := "cors"
|
||||
res, err := tp.CreateContainer(middleware.SetBox(context.Background(), &middleware.Box{AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
BearerToken: &bearerToken,
|
||||
|
|
|
@ -106,14 +106,17 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
params := &layer.PutBucketLifecycleParams{
|
||||
BktInfo: bktInfo,
|
||||
LifecycleCfg: cfg,
|
||||
BktInfo: bktInfo,
|
||||
LifecycleCfg: cfg,
|
||||
CopiesNumbers: h.cfg.LifecycleCopiesNumbers(),
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
if h.obj.LifecycleContainerInfo() == nil {
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
|
||||
|
|
|
@ -521,6 +521,58 @@ func TestPutBucketLifecycleInvalidXML(t *testing.T) {
|
|||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||
}
|
||||
|
||||
func TestPutBucketLifecycleCopiesNumbers(t *testing.T) {
|
||||
t.Run("with lifecycle container", func(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-lifecycle"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
cfg := &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{Days: ptr(21)},
|
||||
}},
|
||||
}
|
||||
|
||||
hc.config.lifecycleCopiesNumbers = []uint32{1}
|
||||
hc.config.copiesNumbers = map[string][]uint32{"default": {2}}
|
||||
|
||||
putBucketLifecycleConfiguration(hc, bktName, cfg, map[string]string{"X-Amz-Meta-Frostfs-Copies-Number": "3"}, true)
|
||||
|
||||
objs := hc.tp.Objects()
|
||||
require.Len(t, objs, 1)
|
||||
require.EqualValues(t, hc.config.lifecycleCopiesNumbers, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
|
||||
})
|
||||
|
||||
t.Run("without lifecycle container", func(t *testing.T) {
|
||||
hc := prepareWithoutContainersHandlerContext(t, false, true)
|
||||
|
||||
bktName := "bucket-lifecycle"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
cfg := &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{Days: ptr(21)},
|
||||
}},
|
||||
}
|
||||
|
||||
hc.config.lifecycleCopiesNumbers = []uint32{1}
|
||||
hc.config.copiesNumbers = map[string][]uint32{"default": {2}}
|
||||
|
||||
putBucketLifecycleConfiguration(hc, bktName, cfg, nil, true)
|
||||
objs := hc.tp.Objects()
|
||||
require.Len(t, objs, 1)
|
||||
require.EqualValues(t, []uint32{2}, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
|
||||
|
||||
putBucketLifecycleConfiguration(hc, bktName, cfg, map[string]string{"X-Amz-Meta-Frostfs-Copies-Number": "3"}, true)
|
||||
objs = hc.tp.Objects()
|
||||
require.Len(t, objs, 1)
|
||||
require.EqualValues(t, []uint32{3}, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
|
||||
})
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, headers map[string]string, addMD5 bool) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg, headers, addMD5)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
|
|
@ -1054,3 +1054,7 @@ func (n *Layer) GetNetworkInfo(ctx context.Context) (netmap.NetworkInfo, error)
|
|||
|
||||
return networkInfo, nil
|
||||
}
|
||||
|
||||
func (n *Layer) LifecycleContainerInfo() *data.BucketInfo {
|
||||
return n.lifecycleCnrInfo
|
||||
}
|
||||
|
|
|
@ -143,6 +143,8 @@ type (
|
|||
listingKeepaliveThrottle time.Duration
|
||||
removeOnReplace bool
|
||||
removeOnReplaceTimeout time.Duration
|
||||
corsCopiesNumbers []uint32
|
||||
lifecycleCopiesNumbers []uint32
|
||||
}
|
||||
|
||||
maxClientsConfig struct {
|
||||
|
@ -379,6 +381,8 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
|
|||
listingKeepaliveThrottle := v.GetDuration(cfgKludgeListingKeepAliveThrottle)
|
||||
removeOnReplace := v.GetBool(cfgRemoveOnReplaceEnabled)
|
||||
removeOnReplaceTimeout := fetchRemoveOnReplaceTimeout(v)
|
||||
corsCopiesNumbers := fetchCopiesNumbers(log, v, cfgCORSCopiesNumbers)
|
||||
lifecycleCopiesNumbers := fetchCopiesNumbers(log, v, cfgLifecycleCopiesNumbers)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
@ -415,6 +419,8 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
|
|||
s.listingKeepaliveThrottle = listingKeepaliveThrottle
|
||||
s.removeOnReplace = removeOnReplace
|
||||
s.removeOnReplaceTimeout = removeOnReplaceTimeout
|
||||
s.corsCopiesNumbers = corsCopiesNumbers
|
||||
s.lifecycleCopiesNumbers = lifecycleCopiesNumbers
|
||||
}
|
||||
|
||||
func (s *appSettings) prepareVHSNamespaces(v *viper.Viper, log *zap.Logger, defaultNamespaces []string) map[string]bool {
|
||||
|
@ -676,6 +682,18 @@ func (s *appSettings) RemoveOnReplaceQueue() int {
|
|||
return s.removeOnReplaceQueue
|
||||
}
|
||||
|
||||
func (s *appSettings) CORSCopiesNumbers() []uint32 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.corsCopiesNumbers
|
||||
}
|
||||
|
||||
func (s *appSettings) LifecycleCopiesNumbers() []uint32 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lifecycleCopiesNumbers
|
||||
}
|
||||
|
||||
func (a *App) initAPI(ctx context.Context) {
|
||||
a.initLayer(ctx)
|
||||
a.initHandler()
|
||||
|
|
|
@ -166,7 +166,11 @@ const (
|
|||
cfgCopiesNumbers = "placement_policy.copies_numbers"
|
||||
|
||||
// CORS.
|
||||
cfgDefaultMaxAge = "cors.default_max_age"
|
||||
cfgDefaultMaxAge = "cors.default_max_age"
|
||||
cfgCORSCopiesNumbers = "cors.copies_numbers"
|
||||
|
||||
// Lifecycle.
|
||||
cfgLifecycleCopiesNumbers = "lifecycle.copies_numbers"
|
||||
|
||||
// MaxClients.
|
||||
cfgMaxClientsCount = "max_clients_count"
|
||||
|
@ -631,15 +635,16 @@ func readRegionMap(filePath string) (map[string]string, error) {
|
|||
return regionMap, nil
|
||||
}
|
||||
|
||||
func fetchDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) []uint32 {
|
||||
unparsed := v.GetStringSlice(cfgSetCopiesNumber)
|
||||
func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper, param string) []uint32 {
|
||||
unparsed := v.GetStringSlice(param)
|
||||
result := make([]uint32, len(unparsed))
|
||||
for i := range unparsed {
|
||||
parsedValue, err := strconv.ParseUint(unparsed[i], 10, 32)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseDefaultCopiesNumbers,
|
||||
l.Warn(logs.FailedToParseCopiesNumbers,
|
||||
zap.Strings("copies numbers", unparsed),
|
||||
zap.Uint32s("default", defaultCopiesNumbers),
|
||||
zap.String("parameter", param),
|
||||
zap.Error(err),
|
||||
logs.TagField(logs.TagApp),
|
||||
)
|
||||
|
@ -683,7 +688,7 @@ func fetchKludgeProfiles(v *viper.Viper) map[string]*KludgeParams {
|
|||
return kludgeProfiles
|
||||
}
|
||||
|
||||
func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
||||
func fetchPlacementPolicyCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
||||
copiesNums := make(map[string][]uint32)
|
||||
for i := 0; ; i++ {
|
||||
key := cfgCopiesNumbers + "." + strconv.Itoa(i) + "."
|
||||
|
@ -698,7 +703,7 @@ func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
|||
for j := range vector {
|
||||
parsedValue, err := strconv.ParseUint(vector[j], 10, 32)
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseCopiesNumbers,
|
||||
l.Warn(logs.FailedToParsePlacementPolicyCopiesNumbers,
|
||||
zap.String("location", constraint),
|
||||
zap.Strings("copies numbers", vector), zap.Error(err),
|
||||
logs.TagField(logs.TagApp))
|
||||
|
@ -733,8 +738,8 @@ func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []s
|
|||
defaultNSRegionMap := fetchRegionMappingPolicies(l, v)
|
||||
defaultNSRegionMap[defaultConstraintName] = fetchDefaultPolicy(l, v)
|
||||
|
||||
defaultNSCopiesNumbers := fetchCopiesNumbers(l, v)
|
||||
defaultNSCopiesNumbers[defaultConstraintName] = fetchDefaultCopiesNumbers(l, v)
|
||||
defaultNSCopiesNumbers := fetchPlacementPolicyCopiesNumbers(l, v)
|
||||
defaultNSCopiesNumbers[defaultConstraintName] = fetchCopiesNumbers(l, v, cfgSetCopiesNumber)
|
||||
|
||||
defaultNSValue := Namespace{
|
||||
LocationConstraints: defaultNSRegionMap,
|
||||
|
|
|
@ -159,6 +159,14 @@ S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_1_VECTOR=2 3 4
|
|||
# CORS
|
||||
# value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type.
|
||||
S3_GW_CORS_DEFAULT_MAX_AGE=600
|
||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||
# `[0]` or empty list means that object will be processed according to the container's placement policy.
|
||||
S3_GW_CORS_COPIES_NUMBERS=0
|
||||
|
||||
# Lifecycle configuration
|
||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||
# `[0]` or empty list means that object will be processed according to the container's placement policy.
|
||||
S3_GW_LIFECYCLE_COPIES_NUMBERS=0
|
||||
|
||||
# Parameters of requests to FrostFS
|
||||
# Numbers of the object copies (for each replica, syntax the same as for `S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_0_VECTOR` above)
|
||||
|
|
|
@ -196,9 +196,18 @@ placement_policy:
|
|||
- 3
|
||||
|
||||
# CORS
|
||||
# value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type.
|
||||
cors:
|
||||
# value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type.
|
||||
default_max_age: 600
|
||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||
# `[0]` or empty list means that object will be processed according to the container's placement policy.
|
||||
copies_numbers: [ 0 ]
|
||||
|
||||
# Lifecycle configuration
|
||||
lifecycle:
|
||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||
# `[0]` or empty list means that object will be processed according to the container's placement policy.
|
||||
copies_numbers: [ 0 ]
|
||||
|
||||
# Parameters of requests to FrostFS
|
||||
frostfs:
|
||||
|
|
|
@ -115,8 +115,10 @@ $ frostfs-s3-gw --config your-config.yaml
|
|||
### Multiple configs
|
||||
|
||||
You can use several config files when running application. It allows you to split configuration into parts.
|
||||
For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](../config)).
|
||||
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag.
|
||||
For example, you can use separate yaml file for pprof and prometheus section in config (
|
||||
see [config examples](../config)).
|
||||
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs
|
||||
using `--config-dir` flag.
|
||||
Also, you can combine these flags:
|
||||
|
||||
```shell
|
||||
|
@ -181,6 +183,7 @@ There are some custom types used for brevity:
|
|||
| `http_logging` | [HTTP Request logger configuration](#http_logging-section) |
|
||||
| `cache` | [Cache configuration](#cache-section) |
|
||||
| `cors` | [CORS configuration](#cors-section) |
|
||||
| `lifecycle` | [Lifecycle configuration](#lifecycle-section) |
|
||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
||||
| `tracing` | [Tracing configuration](#tracing-section) |
|
||||
|
@ -204,9 +207,9 @@ There are some custom types used for brevity:
|
|||
|
||||
```yaml
|
||||
listen_domains:
|
||||
- s3dev.frostfs.devenv
|
||||
- s3dev.<wildcard>.frostfs.devenv
|
||||
- s3dev2.frostfs.devenv
|
||||
- s3dev.frostfs.devenv
|
||||
- s3dev.<wildcard>.frostfs.devenv
|
||||
- s3dev2.frostfs.devenv
|
||||
|
||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||
resolve_order:
|
||||
|
@ -226,8 +229,8 @@ max_clients_count: 100
|
|||
max_clients_deadline: 30s
|
||||
|
||||
allowed_access_key_id_prefixes:
|
||||
- Ck9BHsgKcnwfCTUSFm6pxhoNS4cBqgN2NQ8zVgPjqZDX
|
||||
- 3stjWenX15YwYzczMr88gy3CQr4NYFBQ8P7keGzH5QFn
|
||||
- Ck9BHsgKcnwfCTUSFm6pxhoNS4cBqgN2NQ8zVgPjqZDX
|
||||
- 3stjWenX15YwYzczMr88gy3CQr4NYFBQ8P7keGzH5QFn
|
||||
|
||||
reconnect_interval: 1m
|
||||
|
||||
|
@ -257,16 +260,16 @@ source_ip_header: "Source-Ip"
|
|||
|
||||
```yaml
|
||||
wallet:
|
||||
path: /path/to/wallet.json # Path to wallet
|
||||
passphrase: "" # Passphrase to decrypt wallet.
|
||||
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
||||
path: /path/to/wallet.json # Path to wallet
|
||||
passphrase: "" # Passphrase to decrypt wallet.
|
||||
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|--------------|----------|---------------|---------------------------------------------------------------------------|
|
||||
| `path` | `string` | | Path to wallet |
|
||||
| `passphrase` | `string` | | Passphrase to decrypt wallet. |
|
||||
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
|
||||
| Parameter | Type | Default value | Description |
|
||||
|--------------|----------|---------------|--------------------------------------------------------------------------|
|
||||
| `path` | `string` | | Path to wallet |
|
||||
| `passphrase` | `string` | | Passphrase to decrypt wallet. |
|
||||
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
|
||||
|
||||
### `peers` section
|
||||
|
||||
|
@ -299,7 +302,6 @@ peers:
|
|||
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
|
||||
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
|
||||
|
||||
|
||||
### `placement_policy` section
|
||||
|
||||
```yaml
|
||||
|
@ -324,14 +326,15 @@ File for `region_mapping` must contain something like this:
|
|||
|
||||
```json
|
||||
{
|
||||
"rep-3": "REP 3",
|
||||
"complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X",
|
||||
"example-json-policy": "{\"replicas\":[{\"count\":3,\"selector\":\"SelASD0\"}],\"container_backup_factor\":3,\"selectors\":[{\"name\":\"SelASD0\",\"count\":3,\"filter\":\"*\"}],\"filters\":[]}"
|
||||
"rep-3": "REP 3",
|
||||
"complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X",
|
||||
"example-json-policy": "{\"replicas\":[{\"count\":3,\"selector\":\"SelASD0\"}],\"container_backup_factor\":3,\"selectors\":[{\"name\":\"SelASD0\",\"count\":3,\"filter\":\"*\"}],\"filters\":[]}"
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** on SIGHUP reload policies will be updated only if both parameters are valid.
|
||||
So if you change `default` to some valid value and set invalid path in `region_mapping` the `default` value won't be changed.
|
||||
So if you change `default` to some valid value and set invalid path in `region_mapping` the `default` value won't be
|
||||
changed.
|
||||
|
||||
#### `copies_numbers` subsection
|
||||
|
||||
|
@ -348,7 +351,6 @@ So if you change `default` to some valid value and set invalid path in `region_m
|
|||
| `location_constraint` | `string` | no | | Location constraint text label. |
|
||||
| `vector` | `[]int` | no | | Array of copies numbers corresponding to the constraint. |
|
||||
|
||||
|
||||
### `server` section
|
||||
|
||||
You can specify several listeners for server. For example, for `http` and `https`.
|
||||
|
@ -386,12 +388,12 @@ logger:
|
|||
thereafter: 100
|
||||
interval: 1s
|
||||
tags:
|
||||
- name: app
|
||||
level: info
|
||||
- name: datapath
|
||||
- name: external_blockchain
|
||||
- name: external_storage_tree
|
||||
- name: external_storage
|
||||
- name: app
|
||||
level: info
|
||||
- name: datapath
|
||||
- name: external_blockchain
|
||||
- name: external_storage_tree
|
||||
- name: external_storage
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -416,10 +418,10 @@ tags:
|
|||
level: info
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
||||
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
|
||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------|----------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
||||
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
|
||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
||||
|
||||
### Tag values
|
||||
|
||||
|
@ -429,10 +431,10 @@ tags:
|
|||
* `external_storage` - external interaction with storage node (enabled by default).
|
||||
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
|
||||
|
||||
|
||||
### `http_logging` section
|
||||
|
||||
Could be enabled only in builds with `loghttp` build tag. To build with `loghttp` tag, pass `GOFLAGS` var to `make`:
|
||||
|
||||
```bash
|
||||
make GOFLAGS="-tags=loghttp" [target]
|
||||
```
|
||||
|
@ -454,7 +456,6 @@ http_logging:
|
|||
| `gzip` | `bool` | yes | `false` | Whether to enable Gzip compression to backup log files. |
|
||||
| `destination` | `string` | yes | `stdout` | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. File and folders are created if necessary. |
|
||||
|
||||
|
||||
### `cache` section
|
||||
|
||||
```yaml
|
||||
|
@ -539,11 +540,24 @@ size: 100
|
|||
```yaml
|
||||
cors:
|
||||
default_max_age: 600
|
||||
copies_numbers: [ 0 ]
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-------------------|-------|---------------|------------------------------------------------------|
|
||||
| `default_max_age` | `int` | `600` | Value of `Access-Control-Max-Age` header in seconds. |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-------------------|------------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `default_max_age` | `int` | no | `600` | Value of `Access-Control-Max-Age` header in seconds. |
|
||||
| `copies_numbers` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy. |
|
||||
|
||||
### `lifecycle` section
|
||||
|
||||
```yaml
|
||||
lifecycle:
|
||||
copies_numbers: [ 0 ]
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------------|------------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `copies_numbers` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy. |
|
||||
|
||||
# `pprof` section
|
||||
|
||||
|
@ -594,7 +608,7 @@ tracing:
|
|||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
||||
| `exporter` | `string` | yes | | Type of tracing exporter. |
|
||||
| `endpoint` | `string` | yes | | Address that service listener binds to. |
|
||||
|
@ -611,29 +625,30 @@ tracing:
|
|||
value: value
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
|
||||
| `key` | `string` | yes | | Attribute key. |
|
||||
| `value` | `string` | yes | | Attribute value. |
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------|----------|---------------|---------------|------------------|
|
||||
| `key` | `string` | yes | | Attribute key. |
|
||||
| `value` | `string` | yes | | Attribute value. |
|
||||
|
||||
# `frostfs` section
|
||||
|
||||
Contains parameters of requests to FrostFS.
|
||||
|
||||
The `set_copies_number` value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`)
|
||||
The `set_copies_number` value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated
|
||||
numbers: `1,2,3`)
|
||||
header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
|
||||
|
||||
```yaml
|
||||
frostfs:
|
||||
set_copies_number: [0]
|
||||
set_copies_number: [ 0 ]
|
||||
client_cut: false
|
||||
buffer_max_size_for_put: 1048576 # 1mb
|
||||
tree_pool_max_attempts: 0
|
||||
graceful_close_on_switch_timeout: 10s
|
||||
tombstone:
|
||||
lifetime: 10
|
||||
members_size: 100
|
||||
worker_pool_size: 100
|
||||
lifetime: 10
|
||||
members_size: 100
|
||||
worker_pool_size: 100
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -667,7 +682,8 @@ resolve_bucket:
|
|||
|
||||
# `kludge` section
|
||||
|
||||
Workarounds for non-standard use cases. In `profiles` subsection has the ability to override behavior for specific user agent.
|
||||
Workarounds for non-standard use cases. In `profiles` subsection has the ability to override behavior for specific user
|
||||
agent.
|
||||
|
||||
```yaml
|
||||
kludge:
|
||||
|
@ -676,11 +692,11 @@ kludge:
|
|||
default_namespaces: [ "", "root" ]
|
||||
listing_keepalive_throttle: 10s
|
||||
profile:
|
||||
- user_agent: aws-cli
|
||||
use_default_xmlns: false
|
||||
- user_agent: aws-sdk-go
|
||||
use_default_xmlns: true
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
- user_agent: aws-cli
|
||||
use_default_xmlns: false
|
||||
- user_agent: aws-sdk-go
|
||||
use_default_xmlns: true
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -695,11 +711,11 @@ kludge:
|
|||
|
||||
````yaml
|
||||
profile:
|
||||
- user_agent: aws-cli
|
||||
use_default_xmlns: false
|
||||
- user_agent: aws-sdk-go
|
||||
use_default_xmlns: true
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
- user_agent: aws-cli
|
||||
use_default_xmlns: false
|
||||
- user_agent: aws-sdk-go
|
||||
use_default_xmlns: true
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
````
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -708,9 +724,8 @@ kludge:
|
|||
| `use_default_xmlns` | `bool` | yes | | Enable using default xml namespace for profile. |
|
||||
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
|
||||
|
||||
|
||||
|
||||
# `runtime` section
|
||||
|
||||
Contains runtime parameters.
|
||||
|
||||
```yaml
|
||||
|
@ -723,19 +738,20 @@ runtime:
|
|||
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
|
||||
|
||||
# `features` section
|
||||
|
||||
Contains parameters for enabling features.
|
||||
|
||||
```yaml
|
||||
features:
|
||||
policy:
|
||||
deny_by_default: false
|
||||
md5:
|
||||
enabled: false
|
||||
tree_pool_netmap_support: true
|
||||
remove_on_replace:
|
||||
enabled: false
|
||||
timeout: 30s
|
||||
queue: 10000
|
||||
policy:
|
||||
deny_by_default: false
|
||||
md5:
|
||||
enabled: false
|
||||
tree_pool_netmap_support: true
|
||||
remove_on_replace:
|
||||
enabled: false
|
||||
timeout: 30s
|
||||
queue: 10000
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -748,6 +764,7 @@ features:
|
|||
| `remove_on_replace.queue` | `int` | false | `10000` | Buffer size for objects to delete. If buffer is full creation new unversioned object won't remove old one. Lifecycler will do that. |
|
||||
|
||||
# `web` section
|
||||
|
||||
Contains web server configuration parameters.
|
||||
|
||||
```yaml
|
||||
|
@ -760,7 +777,7 @@ web:
|
|||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `read_timeout` | `duration` | no | `0` | The maximum duration for reading the entire request, including the body. A zero or negative value means there will be no timeout. |
|
||||
| `read_timeout` | `duration` | no | `0` | The maximum duration for reading the entire request, including the body. A zero or negative value means there will be no timeout. |
|
||||
| `read_header_timeout` | `duration` | no | `30s` | The amount of time allowed to read request headers. If `read_header_timeout` is zero, the value of `read_timeout` is used. If both are zero, there is no timeout. |
|
||||
| `write_timeout` | `duration` | no | `0` | The maximum duration before timing out writes of the response. A zero or negative value means there will be no timeout. |
|
||||
| `idle_timeout` | `duration` | no | `30s` | The maximum amount of time to wait for the next request when keep-alives are enabled. If `idle_timeout` is zero, the value of `read_timeout` is used. If both are zero, there is no timeout. |
|
||||
|
@ -773,7 +790,7 @@ FrostfsID contract configuration. To enable this functionality the `rpc_endpoint
|
|||
frostfsid:
|
||||
contract: frostfsid.frostfs
|
||||
validation:
|
||||
enabled: false
|
||||
enabled: false
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -836,8 +853,12 @@ To override config values for default namespaces use namespace names that are pr
|
|||
"test": "{\"replicas\":[{\"count\":1,\"selector\":\"\"}],\"containerBackupFactor\":0,\"selectors\":[],\"filters\":[],\"unique\":false}"
|
||||
},
|
||||
"copies_numbers": {
|
||||
"default": [ 0 ],
|
||||
"test": [ 1 ]
|
||||
"default": [
|
||||
0
|
||||
],
|
||||
"test": [
|
||||
1
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -884,12 +905,12 @@ Configuration of virtual hosted addressing style.
|
|||
|
||||
```yaml
|
||||
vhs:
|
||||
enabled: false
|
||||
vhs_header: X-Frostfs-S3-VHS
|
||||
servername_header: X-Frostfs-Servername
|
||||
namespaces:
|
||||
"ns1": false
|
||||
"ns2": true
|
||||
enabled: false
|
||||
vhs_header: X-Frostfs-S3-VHS
|
||||
servername_header: X-Frostfs-Servername
|
||||
namespaces:
|
||||
"ns1": false
|
||||
"ns2": true
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -905,15 +926,15 @@ Configuration of multinet support.
|
|||
|
||||
```yaml
|
||||
multinet:
|
||||
enabled: false
|
||||
balancer: roundrobin
|
||||
restrict: false
|
||||
fallback_delay: 300ms
|
||||
subnets:
|
||||
- mask: 1.2.3.4/24
|
||||
source_ips:
|
||||
- 1.2.3.4
|
||||
- 1.2.3.5
|
||||
enabled: false
|
||||
balancer: roundrobin
|
||||
restrict: false
|
||||
fallback_delay: 300ms
|
||||
subnets:
|
||||
- mask: 1.2.3.4/24
|
||||
source_ips:
|
||||
- 1.2.3.4
|
||||
- 1.2.3.5
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
|
|
@ -71,8 +71,8 @@ const (
|
|||
FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty"
|
||||
DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'"
|
||||
FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used"
|
||||
FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used"
|
||||
FailedToParseCopiesNumbers = "failed to parse copies numbers, skip"
|
||||
FailedToParseCopiesNumbers = "failed to parse copies numbers, default one will be used"
|
||||
FailedToParsePlacementPolicyCopiesNumbers = "failed to parse placement policy copies numbers, skip"
|
||||
FailedToInitializeTracing = "failed to initialize tracing"
|
||||
DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used"
|
||||
FailedToParseNamespacesConfig = "failed to unmarshal namespaces config"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue