[#667] Use separate copies numbers for system containers

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
Denis Kirillov 2025-03-26 16:32:06 +03:00
parent 42d6fc3fc6
commit 949fc0b484
15 changed files with 311 additions and 125 deletions

View file

@ -6,6 +6,7 @@ This document outlines major changes between releases.
### Fixed ### Fixed
- Number of bucket tags increased to 50 (#613) - Number of bucket tags increased to 50 (#613)
- Use own copies number for CORS and Lifecycle containers (#667)
## [0.32.13] - 2025-03-10 ## [0.32.13] - 2025-03-10

View file

@ -43,6 +43,8 @@ type (
RetryStrategy() RetryStrategy RetryStrategy() RetryStrategy
TLSTerminationHeader() string TLSTerminationHeader() string
ListingKeepaliveThrottle() time.Duration ListingKeepaliveThrottle() time.Duration
CORSCopiesNumbers() []uint32
LifecycleCopiesNumbers() []uint32
} }
FrostFSID interface { FrostFSID interface {

View file

@ -17,7 +17,7 @@ func TestHandler_ListBucketsHandler(t *testing.T) {
const defaultConstraint = "default" const defaultConstraint = "default"
region := "us-west-1" region := "us-west-1"
hc := prepareWithoutCORSHandlerContext(t) hc := prepareWithoutContainersHandlerContext(t, true, true)
hc.config.putLocationConstraint(region) hc.config.putLocationConstraint(region)
props := []Bucket{ props := []Bucket{

View file

@ -65,12 +65,7 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
Reader: r.Body, Reader: r.Body,
NewDecoder: h.cfg.NewXMLDecoder, NewDecoder: h.cfg.NewXMLDecoder,
UserAgent: r.UserAgent(), UserAgent: r.UserAgent(),
} CopiesNumbers: h.cfg.CORSCopiesNumbers(),
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
return
} }
if err = h.obj.PutBucketCORS(ctx, p); err != nil { if err = h.obj.PutBucketCORS(ctx, p); err != nil {

View file

@ -1,6 +1,8 @@
package handler package handler
import ( import (
"crypto/md5"
"encoding/base64"
"encoding/xml" "encoding/xml"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -364,6 +366,30 @@ func addCORSToTree(hc *handlerContext, cors string, bkt *data.BucketInfo, corsCn
require.NoError(hc.t, err) require.NoError(hc.t, err)
} }
func TestPutBucketCORSCopiesNumbers(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-cors"
createBucket(hc, bktName)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{{
AllowedHeaders: []string{"*"},
AllowedMethods: []string{"GET"},
AllowedOrigins: []string{"*"},
}},
}
hc.config.corsCopiesNumbers = []uint32{1}
hc.config.copiesNumbers = map[string][]uint32{"default": {2}}
putBucketCORSConfiguration(hc, bktName, cfg, map[string]string{"X-Amz-Meta-Frostfs-Copies-Number": "3"}, true)
objs := hc.tp.Objects()
require.Len(t, objs, 1)
require.EqualValues(t, hc.config.corsCopiesNumbers, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
}
func requireEqualCORS(t *testing.T, expected string, actual string) { func requireEqualCORS(t *testing.T, expected string, actual string) {
expectedCORS := &data.CORSConfiguration{} expectedCORS := &data.CORSConfiguration{}
err := xml.NewDecoder(strings.NewReader(expected)).Decode(expectedCORS) err := xml.NewDecoder(strings.NewReader(expected)).Decode(expectedCORS)
@ -398,3 +424,28 @@ func getBucketCORS(hc *handlerContext, bktName string) *httptest.ResponseRecorde
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)
return w return w
} }
func putBucketCORSConfiguration(hc *handlerContext, bktName string, cfg *data.CORSConfiguration, headers map[string]string, addMD5 bool) {
w := putBucketCORSConfigurationBase(hc, bktName, cfg, headers, addMD5)
assertStatus(hc.t, w, http.StatusOK)
}
func putBucketCORSConfigurationBase(hc *handlerContext, bktName string, cfg *data.CORSConfiguration, headers map[string]string, addMD5 bool) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", cfg)
for k, v := range headers {
r.Header.Set(k, v)
}
if addMD5 {
rawBody, err := xml.Marshal(cfg)
require.NoError(hc.t, err)
hash := md5.New()
hash.Write(rawBody)
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
}
hc.Handler().PutBucketCorsHandler(w, r)
return w
}

View file

@ -81,6 +81,8 @@ type configMock struct {
placementPolicies map[string]netmap.PlacementPolicy placementPolicies map[string]netmap.PlacementPolicy
copiesNumbers map[string][]uint32 copiesNumbers map[string][]uint32
defaultCopiesNumbers []uint32 defaultCopiesNumbers []uint32
corsCopiesNumbers []uint32
lifecycleCopiesNumbers []uint32
bypassContentEncodingInChunks bool bypassContentEncodingInChunks bool
md5Enabled bool md5Enabled bool
tlsTerminationHeader string tlsTerminationHeader string
@ -161,6 +163,14 @@ func (c *configMock) ListingKeepaliveThrottle() time.Duration {
return 0 return 0
} }
func (c *configMock) CORSCopiesNumbers() []uint32 {
return c.corsCopiesNumbers
}
func (c *configMock) LifecycleCopiesNumbers() []uint32 {
return c.lifecycleCopiesNumbers
}
func (c *configMock) putLocationConstraint(constraint string) { func (c *configMock) putLocationConstraint(constraint string) {
c.placementPolicies[constraint] = c.defaultPolicy c.placementPolicies[constraint] = c.defaultPolicy
} }
@ -168,6 +178,7 @@ func (c *configMock) putLocationConstraint(constraint string) {
type handlerConfig struct { type handlerConfig struct {
cacheCfg *layer.CachesConfig cacheCfg *layer.CachesConfig
withoutCORS bool withoutCORS bool
withoutLifecycle bool
} }
func prepareHandlerContext(t *testing.T) *handlerContext { func prepareHandlerContext(t *testing.T) *handlerContext {
@ -180,11 +191,12 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
} }
} }
func prepareWithoutCORSHandlerContext(t *testing.T) *handlerContext { func prepareWithoutContainersHandlerContext(t *testing.T, cors, lifecycle bool) *handlerContext {
log := zaptest.NewLogger(t) log := zaptest.NewLogger(t)
hc, err := prepareHandlerContextBase(&handlerConfig{ hc, err := prepareHandlerContextBase(&handlerConfig{
cacheCfg: layer.DefaultCachesConfigs(log), cacheCfg: layer.DefaultCachesConfigs(log),
withoutCORS: true, withoutCORS: cors,
withoutLifecycle: lifecycle,
}, log) }, log)
require.NoError(t, err) require.NoError(t, err)
return &handlerContext{ return &handlerContext{
@ -247,12 +259,18 @@ func prepareHandlerContextBase(config *handlerConfig, log *zap.Logger) (*handler
} }
if !config.withoutCORS { if !config.withoutCORS {
layerCfg.CORSCnrInfo, err = createCORSContainer(key, tp) layerCfg.CORSCnrInfo, err = createContainer(key, tp, "cors")
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
if !config.withoutLifecycle {
if layerCfg.LifecycleCnrInfo, err = createContainer(key, tp, "lifecycle"); err != nil {
return nil, err
}
}
var pp netmap.PlacementPolicy var pp netmap.PlacementPolicy
err = pp.DecodeString("REP 1") err = pp.DecodeString("REP 1")
if err != nil { if err != nil {
@ -296,14 +314,13 @@ func prepareHandlerContextBase(config *handlerConfig, log *zap.Logger) (*handler
return hc, nil return hc, nil
} }
func createCORSContainer(key *keys.PrivateKey, tp *layer.TestFrostFS) (*data.BucketInfo, error) { func createContainer(key *keys.PrivateKey, tp *layer.TestFrostFS, bktName string) (*data.BucketInfo, error) {
bearerToken := bearertest.Token() bearerToken := bearertest.Token()
err := bearerToken.Sign(key.PrivateKey) err := bearerToken.Sign(key.PrivateKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
bktName := "cors"
res, err := tp.CreateContainer(middleware.SetBox(context.Background(), &middleware.Box{AccessBox: &accessbox.Box{ res, err := tp.CreateContainer(middleware.SetBox(context.Background(), &middleware.Box{AccessBox: &accessbox.Box{
Gate: &accessbox.GateData{ Gate: &accessbox.GateData{
BearerToken: &bearerToken, BearerToken: &bearerToken,

View file

@ -108,13 +108,16 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
params := &layer.PutBucketLifecycleParams{ params := &layer.PutBucketLifecycleParams{
BktInfo: bktInfo, BktInfo: bktInfo,
LifecycleCfg: cfg, LifecycleCfg: cfg,
CopiesNumbers: h.cfg.LifecycleCopiesNumbers(),
} }
if h.obj.LifecycleContainerInfo() == nil {
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint) params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err) h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
return return
} }
}
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil { if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
h.logAndSendError(ctx, w, "could not put bucket lifecycle configuration", reqInfo, err) h.logAndSendError(ctx, w, "could not put bucket lifecycle configuration", reqInfo, err)

View file

@ -521,6 +521,58 @@ func TestPutBucketLifecycleInvalidXML(t *testing.T) {
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML)) assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML))
} }
func TestPutBucketLifecycleCopiesNumbers(t *testing.T) {
t.Run("with lifecycle container", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-lifecycle"
createBucket(hc, bktName)
cfg := &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{Days: ptr(21)},
}},
}
hc.config.lifecycleCopiesNumbers = []uint32{1}
hc.config.copiesNumbers = map[string][]uint32{"default": {2}}
putBucketLifecycleConfiguration(hc, bktName, cfg, map[string]string{"X-Amz-Meta-Frostfs-Copies-Number": "3"}, true)
objs := hc.tp.Objects()
require.Len(t, objs, 1)
require.EqualValues(t, hc.config.lifecycleCopiesNumbers, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
})
t.Run("without lifecycle container", func(t *testing.T) {
hc := prepareWithoutContainersHandlerContext(t, false, true)
bktName := "bucket-lifecycle"
createBucket(hc, bktName)
cfg := &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{Days: ptr(21)},
}},
}
hc.config.lifecycleCopiesNumbers = []uint32{1}
hc.config.copiesNumbers = map[string][]uint32{"default": {2}}
putBucketLifecycleConfiguration(hc, bktName, cfg, nil, true)
objs := hc.tp.Objects()
require.Len(t, objs, 1)
require.EqualValues(t, []uint32{2}, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
putBucketLifecycleConfiguration(hc, bktName, cfg, map[string]string{"X-Amz-Meta-Frostfs-Copies-Number": "3"}, true)
objs = hc.tp.Objects()
require.Len(t, objs, 1)
require.EqualValues(t, []uint32{3}, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
})
}
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, headers map[string]string, addMD5 bool) { func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, headers map[string]string, addMD5 bool) {
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg, headers, addMD5) w := putBucketLifecycleConfigurationBase(hc, bktName, cfg, headers, addMD5)
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)

View file

@ -1054,3 +1054,7 @@ func (n *Layer) GetNetworkInfo(ctx context.Context) (netmap.NetworkInfo, error)
return networkInfo, nil return networkInfo, nil
} }
func (n *Layer) LifecycleContainerInfo() *data.BucketInfo {
return n.lifecycleCnrInfo
}

View file

@ -143,6 +143,8 @@ type (
listingKeepaliveThrottle time.Duration listingKeepaliveThrottle time.Duration
removeOnReplace bool removeOnReplace bool
removeOnReplaceTimeout time.Duration removeOnReplaceTimeout time.Duration
corsCopiesNumbers []uint32
lifecycleCopiesNumbers []uint32
} }
maxClientsConfig struct { maxClientsConfig struct {
@ -379,6 +381,8 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
listingKeepaliveThrottle := v.GetDuration(cfgKludgeListingKeepAliveThrottle) listingKeepaliveThrottle := v.GetDuration(cfgKludgeListingKeepAliveThrottle)
removeOnReplace := v.GetBool(cfgRemoveOnReplaceEnabled) removeOnReplace := v.GetBool(cfgRemoveOnReplaceEnabled)
removeOnReplaceTimeout := fetchRemoveOnReplaceTimeout(v) removeOnReplaceTimeout := fetchRemoveOnReplaceTimeout(v)
corsCopiesNumbers := fetchCopiesNumbers(log, v, cfgCORSCopiesNumbers)
lifecycleCopiesNumbers := fetchCopiesNumbers(log, v, cfgLifecycleCopiesNumbers)
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
@ -415,6 +419,8 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
s.listingKeepaliveThrottle = listingKeepaliveThrottle s.listingKeepaliveThrottle = listingKeepaliveThrottle
s.removeOnReplace = removeOnReplace s.removeOnReplace = removeOnReplace
s.removeOnReplaceTimeout = removeOnReplaceTimeout s.removeOnReplaceTimeout = removeOnReplaceTimeout
s.corsCopiesNumbers = corsCopiesNumbers
s.lifecycleCopiesNumbers = lifecycleCopiesNumbers
} }
func (s *appSettings) prepareVHSNamespaces(v *viper.Viper, log *zap.Logger, defaultNamespaces []string) map[string]bool { func (s *appSettings) prepareVHSNamespaces(v *viper.Viper, log *zap.Logger, defaultNamespaces []string) map[string]bool {
@ -676,6 +682,18 @@ func (s *appSettings) RemoveOnReplaceQueue() int {
return s.removeOnReplaceQueue return s.removeOnReplaceQueue
} }
func (s *appSettings) CORSCopiesNumbers() []uint32 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.corsCopiesNumbers
}
func (s *appSettings) LifecycleCopiesNumbers() []uint32 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lifecycleCopiesNumbers
}
func (a *App) initAPI(ctx context.Context) { func (a *App) initAPI(ctx context.Context) {
a.initLayer(ctx) a.initLayer(ctx)
a.initHandler() a.initHandler()

View file

@ -167,6 +167,10 @@ const (
// CORS. // CORS.
cfgDefaultMaxAge = "cors.default_max_age" cfgDefaultMaxAge = "cors.default_max_age"
cfgCORSCopiesNumbers = "cors.copies_numbers"
// Lifecycle.
cfgLifecycleCopiesNumbers = "lifecycle.copies_numbers"
// MaxClients. // MaxClients.
cfgMaxClientsCount = "max_clients_count" cfgMaxClientsCount = "max_clients_count"
@ -631,15 +635,16 @@ func readRegionMap(filePath string) (map[string]string, error) {
return regionMap, nil return regionMap, nil
} }
func fetchDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) []uint32 { func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper, param string) []uint32 {
unparsed := v.GetStringSlice(cfgSetCopiesNumber) unparsed := v.GetStringSlice(param)
result := make([]uint32, len(unparsed)) result := make([]uint32, len(unparsed))
for i := range unparsed { for i := range unparsed {
parsedValue, err := strconv.ParseUint(unparsed[i], 10, 32) parsedValue, err := strconv.ParseUint(unparsed[i], 10, 32)
if err != nil { if err != nil {
l.Warn(logs.FailedToParseDefaultCopiesNumbers, l.Warn(logs.FailedToParseCopiesNumbers,
zap.Strings("copies numbers", unparsed), zap.Strings("copies numbers", unparsed),
zap.Uint32s("default", defaultCopiesNumbers), zap.Uint32s("default", defaultCopiesNumbers),
zap.String("parameter", param),
zap.Error(err), zap.Error(err),
logs.TagField(logs.TagApp), logs.TagField(logs.TagApp),
) )
@ -683,7 +688,7 @@ func fetchKludgeProfiles(v *viper.Viper) map[string]*KludgeParams {
return kludgeProfiles return kludgeProfiles
} }
func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 { func fetchPlacementPolicyCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
copiesNums := make(map[string][]uint32) copiesNums := make(map[string][]uint32)
for i := 0; ; i++ { for i := 0; ; i++ {
key := cfgCopiesNumbers + "." + strconv.Itoa(i) + "." key := cfgCopiesNumbers + "." + strconv.Itoa(i) + "."
@ -698,7 +703,7 @@ func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
for j := range vector { for j := range vector {
parsedValue, err := strconv.ParseUint(vector[j], 10, 32) parsedValue, err := strconv.ParseUint(vector[j], 10, 32)
if err != nil { if err != nil {
l.Warn(logs.FailedToParseCopiesNumbers, l.Warn(logs.FailedToParsePlacementPolicyCopiesNumbers,
zap.String("location", constraint), zap.String("location", constraint),
zap.Strings("copies numbers", vector), zap.Error(err), zap.Strings("copies numbers", vector), zap.Error(err),
logs.TagField(logs.TagApp)) logs.TagField(logs.TagApp))
@ -733,8 +738,8 @@ func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []s
defaultNSRegionMap := fetchRegionMappingPolicies(l, v) defaultNSRegionMap := fetchRegionMappingPolicies(l, v)
defaultNSRegionMap[defaultConstraintName] = fetchDefaultPolicy(l, v) defaultNSRegionMap[defaultConstraintName] = fetchDefaultPolicy(l, v)
defaultNSCopiesNumbers := fetchCopiesNumbers(l, v) defaultNSCopiesNumbers := fetchPlacementPolicyCopiesNumbers(l, v)
defaultNSCopiesNumbers[defaultConstraintName] = fetchDefaultCopiesNumbers(l, v) defaultNSCopiesNumbers[defaultConstraintName] = fetchCopiesNumbers(l, v, cfgSetCopiesNumber)
defaultNSValue := Namespace{ defaultNSValue := Namespace{
LocationConstraints: defaultNSRegionMap, LocationConstraints: defaultNSRegionMap,

View file

@ -159,6 +159,14 @@ S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_1_VECTOR=2 3 4
# CORS # CORS
# value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type. # value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type.
S3_GW_CORS_DEFAULT_MAX_AGE=600 S3_GW_CORS_DEFAULT_MAX_AGE=600
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
# `[0]` or empty list means that object will be processed according to the container's placement policy.
S3_GW_CORS_COPIES_NUMBERS=0
# Lifecycle configuration
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
# `[0]` or empty list means that object will be processed according to the container's placement policy.
S3_GW_LIFECYCLE_COPIES_NUMBERS=0
# Parameters of requests to FrostFS # Parameters of requests to FrostFS
# Numbers of the object copies (for each replica, syntax the same as for `S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_0_VECTOR` above) # Numbers of the object copies (for each replica, syntax the same as for `S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_0_VECTOR` above)

View file

@ -196,9 +196,18 @@ placement_policy:
- 3 - 3
# CORS # CORS
# value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type.
cors: cors:
# value of Access-Control-Max-Age header if this value is not set in a rule. Has an int type.
default_max_age: 600 default_max_age: 600
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
# `[0]` or empty list means that object will be processed according to the container's placement policy.
copies_numbers: [ 0 ]
# Lifecycle configuration
lifecycle:
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
# `[0]` or empty list means that object will be processed according to the container's placement policy.
copies_numbers: [ 0 ]
# Parameters of requests to FrostFS # Parameters of requests to FrostFS
frostfs: frostfs:

View file

@ -115,8 +115,10 @@ $ frostfs-s3-gw --config your-config.yaml
### Multiple configs ### Multiple configs
You can use several config files when running application. It allows you to split configuration into parts. You can use several config files when running application. It allows you to split configuration into parts.
For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](../config)). For example, you can use separate yaml file for pprof and prometheus section in config (
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag. see [config examples](../config)).
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs
using `--config-dir` flag.
Also, you can combine these flags: Also, you can combine these flags:
```shell ```shell
@ -181,6 +183,7 @@ There are some custom types used for brevity:
| `http_logging` | [HTTP Request logger configuration](#http_logging-section) | | `http_logging` | [HTTP Request logger configuration](#http_logging-section) |
| `cache` | [Cache configuration](#cache-section) | | `cache` | [Cache configuration](#cache-section) |
| `cors` | [CORS configuration](#cors-section) | | `cors` | [CORS configuration](#cors-section) |
| `lifecycle` | [Lifecycle configuration](#lifecycle-section) |
| `pprof` | [Pprof configuration](#pprof-section) | | `pprof` | [Pprof configuration](#pprof-section) |
| `prometheus` | [Prometheus configuration](#prometheus-section) | | `prometheus` | [Prometheus configuration](#prometheus-section) |
| `tracing` | [Tracing configuration](#tracing-section) | | `tracing` | [Tracing configuration](#tracing-section) |
@ -263,7 +266,7 @@ wallet:
``` ```
| Parameter | Type | Default value | Description | | Parameter | Type | Default value | Description |
|--------------|----------|---------------|---------------------------------------------------------------------------| |--------------|----------|---------------|--------------------------------------------------------------------------|
| `path` | `string` | | Path to wallet | | `path` | `string` | | Path to wallet |
| `passphrase` | `string` | | Passphrase to decrypt wallet. | | `passphrase` | `string` | | Passphrase to decrypt wallet. |
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. | | `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
@ -299,7 +302,6 @@ peers:
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. | | `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. | | `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
### `placement_policy` section ### `placement_policy` section
```yaml ```yaml
@ -331,7 +333,8 @@ File for `region_mapping` must contain something like this:
``` ```
**Note:** on SIGHUP reload policies will be updated only if both parameters are valid. **Note:** on SIGHUP reload policies will be updated only if both parameters are valid.
So if you change `default` to some valid value and set invalid path in `region_mapping` the `default` value won't be changed. So if you change `default` to some valid value and set invalid path in `region_mapping` the `default` value won't be
changed.
#### `copies_numbers` subsection #### `copies_numbers` subsection
@ -348,7 +351,6 @@ So if you change `default` to some valid value and set invalid path in `region_m
| `location_constraint` | `string` | no | | Location constraint text label. | | `location_constraint` | `string` | no | | Location constraint text label. |
| `vector` | `[]int` | no | | Array of copies numbers corresponding to the constraint. | | `vector` | `[]int` | no | | Array of copies numbers corresponding to the constraint. |
### `server` section ### `server` section
You can specify several listeners for server. For example, for `http` and `https`. You can specify several listeners for server. For example, for `http` and `https`.
@ -417,7 +419,7 @@ tags:
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------| |-----------|----------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. | | `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. | | `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
@ -429,10 +431,10 @@ tags:
* `external_storage` - external interaction with storage node (enabled by default). * `external_storage` - external interaction with storage node (enabled by default).
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default). * `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
### `http_logging` section ### `http_logging` section
Could be enabled only in builds with `loghttp` build tag. To build with `loghttp` tag, pass `GOFLAGS` var to `make`: Could be enabled only in builds with `loghttp` build tag. To build with `loghttp` tag, pass `GOFLAGS` var to `make`:
```bash ```bash
make GOFLAGS="-tags=loghttp" [target] make GOFLAGS="-tags=loghttp" [target]
``` ```
@ -454,7 +456,6 @@ http_logging:
| `gzip` | `bool` | yes | `false` | Whether to enable Gzip compression to backup log files. | | `gzip` | `bool` | yes | `false` | Whether to enable Gzip compression to backup log files. |
| `destination` | `string` | yes | `stdout` | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. File and folders are created if necessary. | | `destination` | `string` | yes | `stdout` | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. File and folders are created if necessary. |
### `cache` section ### `cache` section
```yaml ```yaml
@ -539,11 +540,24 @@ size: 100
```yaml ```yaml
cors: cors:
default_max_age: 600 default_max_age: 600
copies_numbers: [ 0 ]
``` ```
| Parameter | Type | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------|-------|---------------|------------------------------------------------------| |-------------------|------------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `default_max_age` | `int` | `600` | Value of `Access-Control-Max-Age` header in seconds. | | `default_max_age` | `int` | no | `600` | Value of `Access-Control-Max-Age` header in seconds. |
| `copies_numbers` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy. |
### `lifecycle` section
```yaml
lifecycle:
copies_numbers: [ 0 ]
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------|------------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `copies_numbers` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy. |
# `pprof` section # `pprof` section
@ -594,7 +608,7 @@ tracing:
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | |--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable the service. | | `enabled` | `bool` | yes | `false` | Flag to enable the service. |
| `exporter` | `string` | yes | | Type of tracing exporter. | | `exporter` | `string` | yes | | Type of tracing exporter. |
| `endpoint` | `string` | yes | | Address that service listener binds to. | | `endpoint` | `string` | yes | | Address that service listener binds to. |
@ -612,7 +626,7 @@ tracing:
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|----------|---------------|---------------|----------------------------------------------------------| |-----------|----------|---------------|---------------|------------------|
| `key` | `string` | yes | | Attribute key. | | `key` | `string` | yes | | Attribute key. |
| `value` | `string` | yes | | Attribute value. | | `value` | `string` | yes | | Attribute value. |
@ -620,12 +634,13 @@ tracing:
Contains parameters of requests to FrostFS. Contains parameters of requests to FrostFS.
The `set_copies_number` value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`) The `set_copies_number` value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated
numbers: `1,2,3`)
header for `PutObject`, `CopyObject`, `CreateMultipartUpload`. header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
```yaml ```yaml
frostfs: frostfs:
set_copies_number: [0] set_copies_number: [ 0 ]
client_cut: false client_cut: false
buffer_max_size_for_put: 1048576 # 1mb buffer_max_size_for_put: 1048576 # 1mb
tree_pool_max_attempts: 0 tree_pool_max_attempts: 0
@ -667,7 +682,8 @@ resolve_bucket:
# `kludge` section # `kludge` section
Workarounds for non-standard use cases. In `profiles` subsection has the ability to override behavior for specific user agent. Workarounds for non-standard use cases. In `profiles` subsection has the ability to override behavior for specific user
agent.
```yaml ```yaml
kludge: kludge:
@ -708,9 +724,8 @@ kludge:
| `use_default_xmlns` | `bool` | yes | | Enable using default xml namespace for profile. | | `use_default_xmlns` | `bool` | yes | | Enable using default xml namespace for profile. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. | | `bypass_content_encoding_check_in_chunks` | `bool` | yes | | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
# `runtime` section # `runtime` section
Contains runtime parameters. Contains runtime parameters.
```yaml ```yaml
@ -723,6 +738,7 @@ runtime:
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. | | `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
# `features` section # `features` section
Contains parameters for enabling features. Contains parameters for enabling features.
```yaml ```yaml
@ -748,6 +764,7 @@ features:
| `remove_on_replace.queue` | `int` | false | `10000` | Buffer size for objects to delete. If buffer is full creation new unversioned object won't remove old one. Lifecycler will do that. | | `remove_on_replace.queue` | `int` | false | `10000` | Buffer size for objects to delete. If buffer is full creation new unversioned object won't remove old one. Lifecycler will do that. |
# `web` section # `web` section
Contains web server configuration parameters. Contains web server configuration parameters.
```yaml ```yaml
@ -836,8 +853,12 @@ To override config values for default namespaces use namespace names that are pr
"test": "{\"replicas\":[{\"count\":1,\"selector\":\"\"}],\"containerBackupFactor\":0,\"selectors\":[],\"filters\":[],\"unique\":false}" "test": "{\"replicas\":[{\"count\":1,\"selector\":\"\"}],\"containerBackupFactor\":0,\"selectors\":[],\"filters\":[],\"unique\":false}"
}, },
"copies_numbers": { "copies_numbers": {
"default": [ 0 ], "default": [
"test": [ 1 ] 0
],
"test": [
1
]
} }
} }
} }

View file

@ -71,8 +71,8 @@ const (
FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty" FailedToReadRegionMapFilePolicies = "failed to read region map file, policies will be empty"
DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'" DefaultLocationConstraintCantBeOverriden = "'default' location constraint can't be overriden by custom policy, use 'placement_policy.default'"
FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used" FailedToParseLocationConstraint = "failed to parse location constraint, it cannot be used"
FailedToParseDefaultCopiesNumbers = "failed to parse 'default' copies numbers, default one will be used" FailedToParseCopiesNumbers = "failed to parse copies numbers, default one will be used"
FailedToParseCopiesNumbers = "failed to parse copies numbers, skip" FailedToParsePlacementPolicyCopiesNumbers = "failed to parse placement policy copies numbers, skip"
FailedToInitializeTracing = "failed to initialize tracing" FailedToInitializeTracing = "failed to initialize tracing"
DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used" DefaultNamespacesCannotBeEmpty = "default namespaces cannot be empty, defaults will be used"
FailedToParseNamespacesConfig = "failed to unmarshal namespaces config" FailedToParseNamespacesConfig = "failed to unmarshal namespaces config"