diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 3e9cd4e11..199ef553b 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -33,6 +33,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" @@ -276,6 +277,9 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig a.setMetabaseConfig(&newConfig, oldConfig) a.setGCConfig(&newConfig, oldConfig) + if err := a.setLimits(&newConfig, oldConfig); err != nil { + return err + } a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) @@ -369,6 +373,14 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() } +func (a *applicationConfiguration) setLimits(newConfig *shardCfg, oldConfig *shardconfig.Config) error { + limitsConfig := oldConfig.Limits() + if err := qos.ValidateConfig(limitsConfig); err != nil { + return err + } + return nil +} + // internals contains application-specific internals that are created // on application startup and are shared b/w the components during // the application life cycle. diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index ef6380a62..a3caf27f5 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -11,6 +11,7 @@ import ( blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" @@ -76,6 +77,7 @@ func TestEngineSection(t *testing.T) { ss := blob.Storages() pl := sc.Pilorama() gc := sc.GC() + limits := sc.Limits() switch num { case 0: @@ -134,6 +136,71 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) + + require.Equal(t, 10_000, limits.MaxReadRunningOps()) + require.Equal(t, 1_000, limits.MaxReadWaitingOps()) + require.Equal(t, 1_000, limits.MaxWriteRunningOps()) + require.Equal(t, 100, limits.MaxWriteWaitingOps()) + require.ElementsMatch(t, limits.ReadTags(), + []limitsconfig.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(20), + ReservedOps: toPtr(1000), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(70), + ReservedOps: toPtr(10000), + }, + { + Tag: "background", + Weight: toPtr(5), + LimitOps: toPtr(10000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + { + Tag: "policer", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + }) + require.ElementsMatch(t, limits.WriteTags(), + []limitsconfig.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(200), + ReservedOps: toPtr(100), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(700), + ReservedOps: toPtr(1000), + }, + { + Tag: "background", + Weight: toPtr(50), + LimitOps: toPtr(1000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "policer", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, fs.FileMode(0o644), pl.Perm()) @@ -188,6 +255,13 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) + + require.Equal(t, limitsconfig.NoLimit, limits.MaxReadRunningOps()) + require.Equal(t, limitsconfig.NoLimit, limits.MaxReadWaitingOps()) + require.Equal(t, limitsconfig.NoLimit, limits.MaxWriteRunningOps()) + require.Equal(t, limitsconfig.NoLimit, limits.MaxWriteWaitingOps()) + require.Equal(t, 0, len(limits.ReadTags())) + require.Equal(t, 0, len(limits.WriteTags())) } return nil }) @@ -201,3 +275,7 @@ func TestEngineSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) } + +func toPtr(v float64) *float64 { + return &v +} diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index 0620c9f63..8aa7d2401 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -4,6 +4,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" @@ -125,6 +126,14 @@ func (x *Config) GC() *gcconfig.Config { ) } +// Limits returns "limits" subsection as a gcconfig.Config. +func (x *Config) Limits() *limitsconfig.Config { + return limitsconfig.From( + (*config.Config)(x). + Sub("limits"), + ) +} + // RefillMetabase returns the value of "resync_metabase" config parameter. // // Returns false if the value is not a valid bool. diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go new file mode 100644 index 000000000..e325a9386 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -0,0 +1,120 @@ +package limits + +import ( + "math" + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "github.com/spf13/cast" +) + +const ( + NoLimit = math.MaxInt +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Config is a wrapper over the config section +// which provides access to Shard's limits configurations. +type Config config.Config + +// MaxReadRunningOps returns the value of "max_read_running_ops" config parameter. +// +// Returns NoLimit if the value is not a positive number. +func (x *Config) MaxReadRunningOps() int { + if s := config.IntSafe((*config.Config)(x), "max_read_running_ops"); s > 0 { + return int(s) + } + return NoLimit +} + +// MaxReadWaitingOps returns the value of "max_read_waiting_ops" config parameter. +// +// Returns NoLimit if the value is not a positive number. +func (x *Config) MaxReadWaitingOps() int { + if s := config.IntSafe((*config.Config)(x), "max_read_waiting_ops"); s > 0 { + return int(s) + } + return NoLimit +} + +// MaxWriteRunningOps returns the value of "max_write_running_ops" config parameter. +// +// Returns NoLimit if the value is not a positive number. +func (x *Config) MaxWriteRunningOps() int { + if s := config.IntSafe((*config.Config)(x), "max_write_running_ops"); s > 0 { + return int(s) + } + return NoLimit +} + +// MaxWriteWaitingOps returns the value of "max_write_waiting_ops" config parameter. +// +// Returns NoLimit if the value is not a positive number. +func (x *Config) MaxWriteWaitingOps() int { + if s := config.IntSafe((*config.Config)(x), "max_write_waiting_ops"); s > 0 { + return int(s) + } + return NoLimit +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 +} + +func (x *Config) ReadTags() []IOTagConfig { + return x.tags("read") +} + +func (x *Config) WriteTags() []IOTagConfig { + return x.tags("write") +} + +func (x *Config) tags(sub string) []IOTagConfig { + c := (*config.Config)(x).Sub(sub) + var result []IOTagConfig + for i := 0; ; i++ { + tag := config.String(c, strconv.Itoa(i)+".tag") + if tag == "" { + return result + } + + var tagConfig IOTagConfig + tagConfig.Tag = tag + + v := c.Value(strconv.Itoa(i) + ".weight") + if v != nil { + w, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.Weight = &w + } + + v = c.Value(strconv.Itoa(i) + ".limit_ops") + if v != nil { + l, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.LimitOps = &l + } + + v = c.Value(strconv.Itoa(i) + ".reserved_ops") + if v != nil { + r, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.ReservedOps = &r + } + + result = append(result, tagConfig) + } +} + +func panicOnErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/config/example/node.env b/config/example/node.env index b2a0633a9..37b459ec0 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -154,6 +154,45 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 #### Limit of concurrent workers collecting expired objects by the garbage collector FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 +#### Limits config +FROSTFS_STORAGE_SHARD_0_LIMITS_MAX_READ_RUNNING_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_MAX_READ_WAITING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_MAX_WRITE_RUNNING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_MAX_WRITE_WAITING_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_0_WEIGHT=20 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_0_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_1_WEIGHT=70 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_1_RESERVED_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_2_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_2_LIMIT_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_3_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_3_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_4_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_4_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_0_WEIGHT=200 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_0_RESERVED_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_1_WEIGHT=700 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_1_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_2_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_2_LIMIT_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_3_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_3_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_4_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_4_LIMIT_OPS=2500 ## 1 shard ### Flag to refill Metabase from BlobStor diff --git a/config/example/node.json b/config/example/node.json index f3192ac2f..38175eef6 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -139,7 +139,10 @@ "skip_session_token_issuer_verification": true }, "get": { - "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] + "priority": [ + "$attribute:ClusterName", + "$attribute:UN-LOCODE" + ] } }, "storage": { @@ -170,7 +173,8 @@ }, "compress": true, "compression_exclude_content_types": [ - "audio/*", "video/*" + "audio/*", + "video/*" ], "compression_estimate_compressibility": true, "compression_estimate_compressibility_threshold": 0.7, @@ -206,6 +210,70 @@ "remover_sleep_interval": "2m", "expired_collector_batch_size": 1500, "expired_collector_worker_count": 15 + }, + "limits": { + "max_read_running_ops": 10000, + "max_read_waiting_ops": 1000, + "max_write_running_ops": 1000, + "max_write_waiting_ops": 100, + "read": [ + { + "tag": "internal", + "weight": 20, + "limit_ops": 0, + "reserved_ops": 1000 + }, + { + "tag": "client", + "weight": 70, + "reserved_ops": 10000 + }, + { + "tag": "background", + "weight": 5, + "limit_ops": 10000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 5, + "limit_ops": 25000 + }, + { + "tag": "policer", + "weight": 5, + "limit_ops": 25000 + } + ], + "write": [ + { + "tag": "internal", + "weight": 200, + "limit_ops": 0, + "reserved_ops": 100 + }, + { + "tag": "client", + "weight": 700, + "reserved_ops": 1000 + }, + { + "tag": "background", + "weight": 50, + "limit_ops": 1000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "policer", + "weight": 50, + "limit_ops": 2500 + } + ] } }, "1": { @@ -267,7 +335,7 @@ "endpoint": "localhost", "exporter": "otlp_grpc", "trusted_ca": "", - "attributes":[ + "attributes": [ { "key": "key0", "value": "value" @@ -296,7 +364,7 @@ }, { "mask": "10.78.70.74/24", - "source_ips":[ + "source_ips": [ "10.78.70.185", "10.78.71.185" ] @@ -306,4 +374,4 @@ "restrict": false, "fallback_delay": "350ms" } -} +} \ No newline at end of file diff --git a/config/example/node.yaml b/config/example/node.yaml index c5acf5386..5540966d9 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -218,6 +218,48 @@ storage: remover_sleep_interval: 2m # frequency of the garbage collector invocation expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector + + limits: + max_read_running_ops: 10000 + max_read_waiting_ops: 1000 + max_write_running_ops: 1000 + max_write_waiting_ops: 100 + read: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 1: writecache: diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 98d72cb69..35b681475 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -194,6 +194,7 @@ The following table describes configuration for each shard. | `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | | `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | | `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | ### `blobstor` subsection @@ -300,6 +301,66 @@ writecache: | `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | | `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | +### `limits` subsection + +### `limits` subsection + +```yaml +limits: + max_read_running_ops: 10000 + max_read_waiting_ops: 1000 + max_write_running_ops: 1000 + max_write_waiting_ops: 100 + read: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 +``` + +| Parameter | Type | Default value | Description | +| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | +| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | +| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | +| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `read` | `[]tag` | empty | Array of shard read settings for tags. | +| `write` | `[]tag` | empty | Array of shard write settings for tags. | +| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | +| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | +| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | +| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | # `node` section diff --git a/internal/qos/tags.go b/internal/qos/tags.go new file mode 100644 index 000000000..6a9a7f7a4 --- /dev/null +++ b/internal/qos/tags.go @@ -0,0 +1,39 @@ +package qos + +import "fmt" + +type IOTag string + +const ( + IOTagClient IOTag = "client" + IOTagInternal IOTag = "internal" + IOTagBackground IOTag = "background" + IOTagWritecache IOTag = "writecache" + IOTagPolicer IOTag = "policer" + IOTagCritical IOTag = "critical" + + ioTagUnknown IOTag = "" +) + +func FromRawString(s string) (IOTag, error) { + switch s { + case string(IOTagCritical): + return IOTagCritical, nil + case string(IOTagClient): + return IOTagClient, nil + case string(IOTagInternal): + return IOTagInternal, nil + case string(IOTagBackground): + return IOTagBackground, nil + case string(IOTagWritecache): + return IOTagWritecache, nil + case string(IOTagPolicer): + return IOTagPolicer, nil + default: + return ioTagUnknown, fmt.Errorf("unknown tag %s", s) + } +} + +func (t IOTag) String() string { + return string(t) +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go new file mode 100644 index 000000000..60e86658f --- /dev/null +++ b/internal/qos/validate.go @@ -0,0 +1,88 @@ +package qos + +import ( + "errors" + "fmt" + "math" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" +) + +var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") + +type tagConfig struct { + Shares, Limit, Reserved *float64 +} + +func ValidateConfig(c *limits.Config) error { + if c.MaxReadRunningOps() <= 0 { + return fmt.Errorf("invalid 'max_read_running_ops = %d': must be greater than zero", c.MaxReadRunningOps()) + } + if c.MaxReadWaitingOps() <= 0 { + return fmt.Errorf("invalid 'max_read_waiting_ops = %d': must be greater than zero", c.MaxReadWaitingOps()) + } + if c.MaxWriteRunningOps() <= 0 { + return fmt.Errorf("invalid 'max_write_running_ops = %d': must be greater than zero", c.MaxWriteRunningOps()) + } + if c.MaxWriteWaitingOps() <= 0 { + return fmt.Errorf("invalid 'max_write_waiting_ops = %d': must be greater than zero", c.MaxWriteWaitingOps()) + } + if err := validateTags(c.ReadTags()); err != nil { + return fmt.Errorf("'read' config validation error: %w", err) + } + if err := validateTags(c.WriteTags()); err != nil { + return fmt.Errorf("'write' config validation error: %w", err) + } + return nil +} + +func validateTags(configTags []limits.IOTagConfig) error { + tags := map[IOTag]tagConfig{ + IOTagClient: {}, + IOTagInternal: {}, + IOTagBackground: {}, + IOTagWritecache: {}, + IOTagPolicer: {}, + } + for _, t := range configTags { + tag, err := FromRawString(t.Tag) + if err != nil { + return fmt.Errorf("invalid tag %s: %w", t.Tag, err) + } + if _, ok := tags[tag]; !ok { + return fmt.Errorf("tag %s is not configurable", t.Tag) + } + tags[tag] = tagConfig{ + Shares: t.Weight, + Limit: t.LimitOps, + Reserved: t.ReservedOps, + } + } + idx := 0 + var shares float64 + for t, v := range tags { + if idx == 0 { + idx++ + shares = float64Value(v.Shares) + } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { + return errWeightsMustBeSpecified + } + if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { + return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) + } + if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { + return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) + } + if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { + return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) + } + } + return nil +} + +func float64Value(f *float64) float64 { + if f == nil { + return 0.0 + } + return *f +}