Compare commits

..

7 commits

Author SHA1 Message Date
d63ff20078
[#1445] shard/gc: Remove graves of new format separately
All checks were successful
DCO action / DCO (pull_request) Successful in 34s
Vulncheck / Vulncheck (pull_request) Successful in 1m4s
Build / Build Components (pull_request) Successful in 1m38s
Pre-commit hooks / Pre-commit (pull_request) Successful in 1m41s
Tests and linters / Run gofumpt (pull_request) Successful in 2m27s
Tests and linters / Tests with -race (pull_request) Successful in 3m12s
Tests and linters / Lint (pull_request) Successful in 3m17s
Tests and linters / Tests (pull_request) Successful in 3m35s
Tests and linters / Staticcheck (pull_request) Successful in 3m34s
Tests and linters / gopls check (pull_request) Successful in 4m22s
Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:07 +03:00
cee94aae33
[#1445] engine/test: Add test for GC handling of expired tombstones
Since the GC behavior is changing drastically. This test is needed
to ensure that the GC correctly deletes expired tombstones and graves.
The test uses graves of both old and new formats.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:07 +03:00
e119f35827
[#1445] shard/gc: Collect expired objects of all types in one worker
Before, expired objects of each type were handled separately. Since
handling expired objects of each type is similar, let's use a common
function for that, collect and remove them in one worker.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:07 +03:00
ab1362c297
[#1445] shard/gc: Remove object type from duration metric
Before, expired objects of each type were handled in separate
go-routines. Now expired objects of all types will be handled
in one go-routine. There's no way to use object type when writing
expired object handling duration metric, so remove object type
from this metric.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:06 +03:00
24709df702
[#1445] shard/gc: Make handler and callback names more descriptive
Consider the following methods:
- `(*Shard).collectExpiredTombstones`
- `(*StorageEngines).processExpiredTombstones`
- `(*Shard).HandleExpiredTombstones`

All of them handle not tombstones but graves. `HandleExpiredTombstones`
in fact deletes tombstones but it does it based on graves it received.
So, rename all `...Tombstones` methods to `...Graves` method. It'll make
future changes in the garbage collector behavior simpler.

Also, rename all `...Locks` methods to `...LockObjects` because they
handle not locks but lock objects.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:05 +03:00
3cbd8be700
[#1445] metabase/test: Add test for graves of new format
Test iterating over graveyard populated with graves of both old and
new formats, i. e. without and with an expiration epoch, respectively.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:05 +03:00
efec26b8ef
[#1445] local_object_storage: Append expiration epoch to graves
In the near future the garbage collector will delete expired tombstones
and graves separately. So, all graves should have expiration epochs.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2025-02-18 13:41:04 +03:00
235 changed files with 4524 additions and 4645 deletions

83
.ci/Jenkinsfile vendored
View file

@ -1,83 +0,0 @@
def golang = ['1.23', '1.24']
def golangDefault = "golang:${golang.last()}"
async {
for (version in golang) {
def go = version
task("test/go${go}") {
container("golang:${go}") {
sh 'make test'
}
}
task("build/go${go}") {
container("golang:${go}") {
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
sh """
make bin/frostfs-${app}
bin/frostfs-${app} --version
"""
}
}
}
}
task('test/race') {
container(golangDefault) {
sh 'make test GOFLAGS="-count=1 -race"'
}
}
task('lint') {
container(golangDefault) {
sh 'make lint-install lint'
}
}
task('staticcheck') {
container(golangDefault) {
sh 'make staticcheck-install staticcheck-run'
}
}
task('gopls') {
container(golangDefault) {
sh 'make gopls-install gopls-run'
}
}
task('gofumpt') {
container(golangDefault) {
sh '''
make fumpt-install
make fumpt
git diff --exit-code --quiet
'''
}
}
task('vulncheck') {
container(golangDefault) {
sh '''
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...
'''
}
}
task('pre-commit') {
dockerfile("""
FROM ${golangDefault}
RUN apt update && \
apt install -y --no-install-recommends pre-commit
""") {
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
}
}
}
}
// TODO: dco check

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.23', '1.24' ]
go_versions: [ '1.22', '1.23' ]
steps:
- uses: actions/checkout@v3

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -21,7 +21,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.24
go-version: 1.23
- name: Set up Python
run: |
apt update

View file

@ -16,7 +16,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install linters
@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.23', '1.24' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.22'
cache: true
- name: Run tests
@ -68,7 +68,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install staticcheck
@ -104,7 +104,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install gofumpt

View file

@ -18,7 +18,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
check-latest: true
- name: Install govulncheck

View file

@ -22,11 +22,6 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
gci:
sections:
- standard
- default
custom-order: true
govet:
# report about shadowed variables
check-shadowing: false
@ -77,7 +72,6 @@ linters:
- durationcheck
- exhaustive
- copyloopvar
- gci
- gofmt
- goimports
- misspell

View file

@ -1,6 +1,5 @@
#!/usr/bin/make -f
SHELL = bash
.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@ -8,7 +7,7 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.23
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0
@ -17,7 +16,7 @@ PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64
endif
STATICCHECK_VERSION ?= 2025.1.1
STATICCHECK_VERSION ?= 2024.1.1
ARCH = amd64
BIN = bin
@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
GOPLS_VERSION ?= v0.17.1
GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp)
@ -116,7 +115,7 @@ protoc:
# Install protoc
protoc-install:
@rm -rf $(PROTOBUF_DIR)
@mkdir -p $(PROTOBUF_DIR)
@mkdir $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@ -170,7 +169,7 @@ imports:
# Install gofumpt
fumpt-install:
@rm -rf $(GOFUMPT_DIR)
@mkdir -p $(GOFUMPT_DIR)
@mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt
@ -187,37 +186,14 @@ test:
@echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./...
# Install Gerrit commit-msg hook
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
review-install:
@git config remote.review.url \
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
@mkdir -p $(GIT_HOOK_DIR)/
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
@chmod +x $(GIT_HOOK_DIR)/commit-msg
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
# Create a PR in Gerrit
review: BRANCH ?= master
review:
@git push review HEAD:refs/for/$(BRANCH) \
--push-option r=e.stratonikov@yadro.com \
--push-option r=d.stepanov@yadro.com \
--push-option r=an.nikiforov@yadro.com \
--push-option r=a.arifullin@yadro.com \
--push-option r=ekaterina.lebedeva@yadro.com \
--push-option r=a.savchuk@yadro.com \
--push-option r=a.chuprov@yadro.com
# Run pre-commit
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
lint-install: $(BIN)
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@ -236,7 +212,7 @@ lint:
# Install staticcheck
staticcheck-install:
@rm -rf $(STATICCHECK_DIR)
@mkdir -p $(STATICCHECK_DIR)
@mkdir $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck
@ -249,7 +225,7 @@ staticcheck-run:
# Install gopls
gopls-install:
@rm -rf $(GOPLS_DIR)
@mkdir -p $(GOPLS_DIR)
@mkdir $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls

View file

@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
nbuf := make([]byte, 8)
copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
}

View file

@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE()))
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()

View file

@ -34,7 +34,7 @@ const (
subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address"
extendedFlag = "extended"
includeNamesFlag = "include-names"
groupNameFlag = "group-name"
groupIDFlag = "group-id"
@ -209,7 +209,7 @@ func initFrostfsIDListSubjectsCmd() {
Cmd.AddCommand(frostfsidListSubjectsCmd)
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
}
func initFrostfsIDCreateGroupCmd() {
@ -256,7 +256,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
}
func initFrostfsIDSetKVCmd() {
@ -336,7 +336,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
}
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
extended, _ := cmd.Flags().GetBool(extendedFlag)
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
@ -349,19 +349,21 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
for _, addr := range subAddresses {
if !extended {
if !includeNames {
cmd.Println(address.Uint160ToString(addr))
continue
}
items, err := reader.GetSubject(addr)
sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, addr, subj)
cmd.Println()
cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
}
}
@ -481,7 +483,7 @@ func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd)
extended, _ := cmd.Flags().GetBool(extendedFlag)
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
@ -499,7 +501,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
for _, subjAddr := range subjects {
if !extended {
if !includeNames {
cmd.Println(address.Uint160ToString(subjAddr))
continue
}
@ -508,8 +510,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, subjAddr, subj)
cmd.Println()
cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
}
}
@ -599,30 +600,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
return inv, cs, nmHash
}
func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
pk := "<nil>"
if subj.PrimaryKey != nil {
pk = subj.PrimaryKey.String()
}
cmd.Printf("Primary key: %s\n", pk)
cmd.Printf("Name: %s\n", subj.Name)
cmd.Printf("Namespace: %s\n", subj.Namespace)
if len(subj.AdditionalKeys) > 0 {
cmd.Printf("Additional keys:\n")
for _, key := range subj.AdditionalKeys {
k := "<nil>"
if key != nil {
k = key.String()
}
cmd.Printf("- %s\n", k)
}
}
if len(subj.KV) > 0 {
cmd.Printf("KV:\n")
for k, v := range subj.KV {
cmd.Printf("- %s: %s\n", k, v)
}
}
}

View file

@ -3,7 +3,6 @@ package helper
import (
"errors"
"fmt"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err
}
for k, v := range m {
if slices.Contains(NetmapConfigKeys, k) {
md[k] = v
for _, key := range NetmapConfigKeys {
if k == key {
md[k] = v
break
}
}
}
return nil

View file

@ -22,14 +22,15 @@ import (
)
const (
gasInitialTotalSupply = 30000000 * native.GASFactor
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
)
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
}
func transferFunds(c *helper.InitializeContext) error {
@ -41,11 +42,6 @@ func transferFunds(c *helper.InitializeContext) error {
return err
}
version, err := c.Client.GetVersion()
if err != nil {
return err
}
var transfers []transferTarget
for _, acc := range c.Accounts {
to := acc.Contract.ScriptHash()
@ -63,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{
Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(),
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
Amount: initialCommitteeGASAmount(c),
},
transferTarget{
Token: neo.Hash,
@ -87,23 +83,16 @@ func transferFunds(c *helper.InitializeContext) error {
// transferFundsFinished checks balances of accounts we transfer GAS to.
// The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
if err != nil {
return false, err
}
acc := c.Accounts[0]
version, err := c.Client.GetVersion()
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(acc.Contract.ScriptHash())
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
return false, err
}
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
if err != nil {
return false, err
}
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {

View file

@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
_ = tw.Flush()
cmd.Print(buf.String())

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@ -40,6 +41,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))

View file

@ -0,0 +1,137 @@
package storagecfg
const configTemplate = `logger:
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
node:
wallet:
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
addresses: # list of addresses announced by Storage node in the Network map
- {{ .AnnouncedAddress }}
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
grpc:
num: 1 # total number of listener endpoints
0:
endpoint: {{ .Endpoint }} # endpoint for gRPC server
tls:{{if .TLSCert}}
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
certificate: {{ .TLSCert }} # path to TLS certificate
key: {{ .TLSKey }} # path to TLS key
{{- else }}
enabled: false # disable TLS for a gRPC connection
{{- end}}
control:
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
{{- range .AuthorizedKeys }}
- {{.}}{{end}}
grpc:
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
morph:
dial_timeout: 20s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # use TTL cache for side chain GET operations
rpc_endpoint: # side chain N3 RPC endpoints
{{- range .MorphRPC }}
- address: wss://{{.}}/ws{{end}}
{{if not .Relay }}
storage:
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard:
default: # section with the default shard parameters
metabase:
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
blobstor:
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 2 # max depth of object tree storage in FS
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
compress: true # turn on/off Zstandard compression (level 3) of stored objects
compression_exclude_content_types:
- audio/*
- video/*
blobovnicza:
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
gc:
remover_batch_size: 200 # number of objects to be removed by the garbage collector
remover_sleep_interval: 5m # frequency of the garbage collector invocation
0:
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
metabase:
path: {{ .MetabasePath }} # path to the metabase
blobstor:
path: {{ .BlobstorPath }} # path to the blobstor
{{end}}`
const (
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
)
var n3config = map[string]struct {
MorphRPC []string
RPC []string
NeoFSContract string
BalanceContract string
}{
"testnet": {
MorphRPC: []string{
"rpc01.morph.testnet.fs.neo.org:51331",
"rpc02.morph.testnet.fs.neo.org:51331",
"rpc03.morph.testnet.fs.neo.org:51331",
"rpc04.morph.testnet.fs.neo.org:51331",
"rpc05.morph.testnet.fs.neo.org:51331",
"rpc06.morph.testnet.fs.neo.org:51331",
"rpc07.morph.testnet.fs.neo.org:51331",
},
RPC: []string{
"rpc01.testnet.n3.nspcc.ru:21331",
"rpc02.testnet.n3.nspcc.ru:21331",
"rpc03.testnet.n3.nspcc.ru:21331",
"rpc04.testnet.n3.nspcc.ru:21331",
"rpc05.testnet.n3.nspcc.ru:21331",
"rpc06.testnet.n3.nspcc.ru:21331",
"rpc07.testnet.n3.nspcc.ru:21331",
},
NeoFSContract: neofsTestnetAddress,
BalanceContract: balanceTestnetAddress,
},
"mainnet": {
MorphRPC: []string{
"rpc1.morph.fs.neo.org:40341",
"rpc2.morph.fs.neo.org:40341",
"rpc3.morph.fs.neo.org:40341",
"rpc4.morph.fs.neo.org:40341",
"rpc5.morph.fs.neo.org:40341",
"rpc6.morph.fs.neo.org:40341",
"rpc7.morph.fs.neo.org:40341",
},
RPC: []string{
"rpc1.n3.nspcc.ru:10331",
"rpc2.n3.nspcc.ru:10331",
"rpc3.n3.nspcc.ru:10331",
"rpc4.n3.nspcc.ru:10331",
"rpc5.n3.nspcc.ru:10331",
"rpc6.n3.nspcc.ru:10331",
"rpc7.n3.nspcc.ru:10331",
},
NeoFSContract: neofsMainnetAddress,
BalanceContract: balanceMainnetAddress,
},
}

View file

@ -0,0 +1,433 @@
package storagecfg
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
"time"
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/chzyer/readline"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
)
const (
walletFlag = "wallet"
accountFlag = "account"
)
const (
defaultControlEndpoint = "localhost:8090"
defaultDataEndpoint = "localhost"
)
// RootCmd is a root command of config section.
var RootCmd = &cobra.Command{
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
Short: "Section for storage node configuration commands",
Run: storageConfig,
}
func init() {
fs := RootCmd.Flags()
fs.StringP(walletFlag, "w", "", "Path to wallet")
fs.StringP(accountFlag, "a", "", "Wallet account")
}
type config struct {
AnnouncedAddress string
AuthorizedKeys []string
ControlEndpoint string
Endpoint string
TLSCert string
TLSKey string
MorphRPC []string
Attribute struct {
Locode string
}
Wallet struct {
Path string
Account string
Password string
}
Relay bool
BlobstorPath string
MetabasePath string
}
func storageConfig(cmd *cobra.Command, args []string) {
outPath := getOutputPath(args)
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
readline.SetHistoryPath(historyPath)
var c config
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
if c.Wallet.Path == "" {
c.Wallet.Path = getPath("Path to the storage node wallet: ")
}
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
fatalOnErr(err)
fillWalletAccount(cmd, &c, w)
accH, err := flags.ParseAddress(c.Wallet.Account)
fatalOnErr(err)
acc := w.GetAccount(accH)
if acc == nil {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
fatalOnErr(err)
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
network := readNetwork(cmd)
c.MorphRPC = n3config[network].MorphRPC
depositGas(cmd, acc, network)
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
endpoint := getDefaultEndpoint(cmd, &c)
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
if c.Endpoint == "" {
c.Endpoint = endpoint
}
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
if c.ControlEndpoint == "" {
c.ControlEndpoint = defaultControlEndpoint
}
c.TLSCert = getPath("TLS Certificate (optional): ")
if c.TLSCert != "" {
c.TLSKey = getPath("TLS Key: ")
}
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
if !c.Relay {
p := getPath("Path to the storage directory (all available storage will be used): ")
c.BlobstorPath = filepath.Join(p, "blob")
c.MetabasePath = filepath.Join(p, "meta")
}
out := applyTemplate(c)
fatalOnErr(os.WriteFile(outPath, out, 0o644))
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
}
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
var addr, port string
for {
c.AnnouncedAddress = getString("Publicly announced address: ")
validator := netutil.Address{}
err := validator.FromString(c.AnnouncedAddress)
if err != nil {
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
continue
}
uriAddr, err := url.Parse(validator.URIAddr())
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
addr = uriAddr.Hostname()
port = uriAddr.Port()
ip, err := net.ResolveIPAddr("ip", addr)
if err != nil {
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
continue
}
if !ip.IP.IsGlobalUnicast() {
cmd.Println("IP must be global unicast.")
continue
}
cmd.Printf("Resolved IP address: %s\n", ip.String())
_, err = strconv.ParseUint(port, 10, 16)
if err != nil {
cmd.Println("Port must be an integer.")
continue
}
break
}
return net.JoinHostPort(defaultDataEndpoint, port)
}
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
if c.Wallet.Account == "" {
addr := address.Uint160ToString(w.GetChangeAddress())
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
if c.Wallet.Account == "" {
c.Wallet.Account = addr
}
}
}
func readNetwork(cmd *cobra.Command) string {
var network string
for {
network = getString("Choose network [mainnet]/testnet: ")
switch network {
case "":
network = "mainnet"
case "testnet", "mainnet":
default:
cmd.Println(`Network must be either "mainnet" or "testnet"`)
continue
}
break
}
return network
}
func getOutputPath(args []string) string {
if len(args) != 0 {
return args[0]
}
outPath := getPath("File to write config at [./config.yml]: ")
if outPath == "" {
outPath = "./config.yml"
}
return outPath
}
func getWalletAccount(w *wallet.Wallet, prompt string) string {
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
for i := range w.Accounts {
addrs[i] = readline.PcItem(w.Accounts[i].Address)
}
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
defer readline.SetAutoComplete(nil)
s, err := readline.Line(prompt)
fatalOnErr(err)
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
}
func getString(prompt string) string {
s, err := readline.Line(prompt)
fatalOnErr(err)
if s != "" {
_ = readline.AddHistory(s)
}
return s
}
type filenameCompleter struct{}
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
prefix := string(line[:pos])
dir := filepath.Dir(prefix)
de, err := os.ReadDir(dir)
if err != nil {
return nil, 0
}
for i := range de {
name := filepath.Join(dir, de[i].Name())
if strings.HasPrefix(name, prefix) {
tail := []rune(strings.TrimPrefix(name, prefix))
if de[i].IsDir() {
tail = append(tail, filepath.Separator)
}
newLine = append(newLine, tail)
}
}
if pos != 0 {
return newLine, pos - len([]rune(dir))
}
return newLine, 0
}
func getPath(prompt string) string {
readline.SetAutoComplete(filenameCompleter{})
defer readline.SetAutoComplete(nil)
p, err := readline.Line(prompt)
fatalOnErr(err)
if p == "" {
return p
}
_ = readline.AddHistory(p)
abs, err := filepath.Abs(p)
if err != nil {
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
}
return abs
}
func getConfirmation(def bool, prompt string) bool {
for {
s, err := readline.Line(prompt)
fatalOnErr(err)
switch strings.ToLower(s) {
case "y", "yes":
return true
case "n", "no":
return false
default:
if len(s) == 0 {
return def
}
}
}
}
func applyTemplate(c config) []byte {
tmpl, err := template.New("config").Parse(configTemplate)
fatalOnErr(err)
b := bytes.NewBuffer(nil)
fatalOnErr(tmpl.Execute(b, c))
return b.Bytes()
}
func fatalOnErr(err error) {
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
sideClient := initClient(n3config[network].MorphRPC)
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
sideActor, err := actor.NewSimple(sideClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
}
sideGas := nep17.NewReader(sideActor, balanceHash)
accSH := acc.Contract.ScriptHash()
balance, err := sideGas.BalanceOf(accSH)
if err != nil {
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
}
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
fixedn.ToString(balance, 12)))
if !ok {
return
}
amountStr := getString("Enter amount in GAS: ")
amount, err := fixedn.FromString(amountStr, 8)
if err != nil {
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
}
mainClient := initClient(n3config[network].RPC)
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
mainActor, err := actor.NewSimple(mainClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
}
mainGas := nep17.New(mainActor, gas.Hash)
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
if err != nil {
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
}
cmd.Print("Waiting for transactions to persist.")
tick := time.NewTicker(time.Second / 2)
defer tick.Stop()
timer := time.NewTimer(time.Second * 20)
defer timer.Stop()
at := trigger.Application
loop:
for {
select {
case <-tick.C:
_, err := mainClient.GetApplicationLog(txHash, &at)
if err == nil {
cmd.Print("\n")
break loop
}
cmd.Print(".")
case <-timer.C:
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
return
}
os.Exit(1)
}
}
}
func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
DialTimeout: time.Second * 2,
RequestTimeout: time.Second * 5,
})
if err != nil {
continue
}
if err = c.Init(); err != nil {
continue
}
return c
}
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
panic("unreachable")
}

View file

@ -9,6 +9,7 @@ import (
"io"
"os"
"slices"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@ -76,7 +77,9 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
slices.SortFunc(list, cid.ID.Cmp)
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
})
return list
}
@ -684,7 +687,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err)
}
slices.SortFunc(list, oid.ID.Cmp)
slices.SortFunc(list, func(a, b oid.ID) int {
return strings.Compare(a.EncodeToString(), b.EncodeToString())
})
return &SearchObjectsRes{
ids: list,

View file

@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{
Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
},

View file

@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint"
RPCShorthand = "r"
RPCDefault = ""
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')"
RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
Timeout = "timeout"
TimeoutShorthand = "t"

View file

@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets {
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
}
_ = tw.Flush()
cmd.Print(buf.String())

View file

@ -1,117 +0,0 @@
package control
import (
"bytes"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
const (
FullInfoFlag = "full"
FullInfoFlagUsage = "Print full ShardInfo."
)
var locateObjectCmd = &cobra.Command{
Use: "locate-object",
Short: "List shards storing the object",
Long: "List shards storing the object",
Run: locateObject,
}
func initControlLocateObjectCmd() {
initControlFlags(locateObjectCmd)
flags := locateObjectCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
}
func locateObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
_ = object.ReadObjectAddress(cmd, &cnr, &obj)
pk := key.Get(cmd)
body := new(control.ListShardsForObjectRequest_Body)
body.SetContainerId(cnr.EncodeToString())
body.SetObjectId(obj.EncodeToString())
req := new(control.ListShardsForObjectRequest)
req.SetBody(body)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var err error
var resp *control.ListShardsForObjectResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.ListShardsForObject(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
shardIDs := resp.GetBody().GetShard_ID()
isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
if !isFull {
for _, id := range shardIDs {
cmd.Println(base58.Encode(id))
}
return
}
// get full shard info
listShardsReq := new(control.ListShardsRequest)
listShardsReq.SetBody(new(control.ListShardsRequest_Body))
signRequest(cmd, pk, listShardsReq)
var listShardsResp *control.ListShardsResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
listShardsResp, err = control.ListShards(client, listShardsReq)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
shards := listShardsResp.GetBody().GetShards()
sortShardsByID(shards)
shards = filterShards(shards, shardIDs)
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
if isJSON {
prettyPrintShardsJSON(cmd, shards)
} else {
prettyPrintShards(cmd, shards)
}
}
func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
var res []control.ShardInfo
for _, id := range ids {
for _, inf := range info {
if bytes.Equal(inf.Shard_ID, id) {
res = append(res, inf)
}
}
}
return res
}

View file

@ -39,7 +39,6 @@ func init() {
listRulesCmd,
getRuleCmd,
listTargetsCmd,
locateObjectCmd,
)
initControlHealthCheckCmd()
@ -53,5 +52,4 @@ func init() {
initControlListRulesCmd()
initControGetRuleCmd()
initControlListTargetsCmd()
initControlLocateObjectCmd()
}

View file

@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
var sealWritecacheShardCmd = &cobra.Command{
Use: "seal",
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
Run: sealWritecache,
}

View file

@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
objAddr = ReadObjectAddress(cmd, &cnr, &obj)
objAddr = readObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)

View file

@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename)

View file

@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)

View file

@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)

View file

@ -101,7 +101,7 @@ func initObjectNodesCmd() {
func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID
var objID oid.ID
ReadObjectAddress(cmd, &cnrID, &objID)
readObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)

View file

@ -56,7 +56,7 @@ func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err)

View file

@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)

View file

@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs
}
func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)

View file

@ -33,13 +33,12 @@ func _client() (tree.TreeServiceClient, error) {
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
tracing.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
tracing.NewStreamClientInterceptor(),
),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
}
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {

View file

@ -9,7 +9,6 @@ import (
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@ -39,14 +38,13 @@ func reloadConfig() error {
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled"))
var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
log.Reload(logPrm)
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
return nil
return logPrm.Reload()
}
func watchForSignal(ctx context.Context, cancel func()) {

View file

@ -31,6 +31,7 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@ -69,7 +70,6 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics()
var logPrm logger.Prm
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)

View file

@ -2,17 +2,13 @@ package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@ -31,11 +27,6 @@ Available search filters:
var initialPrompt string
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
2: schema.MetabaseParserV2,
3: schema.MetabaseParserV3,
}
func init() {
common.AddComponentPathFlag(tuiCMD, &vPath)
@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error {
}
defer db.Close()
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
if !hasVersion {
return errors.New("couldn't detect schema version")
}
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
if !ok {
return fmt.Errorf("unknown schema version %d", schemaVersion)
}
// Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
app := tview.NewApplication()
ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
var (
shardInfoBucket = []byte{5}
versionRecord = []byte("version")
)
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
err := db.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(shardInfoBucket)
if bkt == nil {
return nil
}
rec := bkt.Get(versionRecord)
if rec == nil {
return nil
}
version = binary.LittleEndian.Uint64(rec)
ok = true
return nil
})
if err != nil {
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
}
return
}

View file

@ -80,15 +80,10 @@ var (
},
)
UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
UserAttributeParser = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
)
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
)
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: StrictResolver,
@ -113,14 +108,4 @@ var (
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
cidResolver: LenientResolver,
oidResolver: LenientResolver,
})
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
)

View file

@ -22,31 +22,27 @@ const (
Split
ContainerCounters
ECInfo
ExpirationEpochToObject
ObjectToExpirationEpoch
)
var x = map[Prefix]string{
Graveyard: "Graveyard",
Garbage: "Garbage",
ToMoveIt: "To Move It",
ContainerVolume: "Container Volume",
Locked: "Locked",
ShardInfo: "Shard Info",
Primary: "Primary",
Lockers: "Lockers",
Tombstone: "Tombstone",
Small: "Small",
Root: "Root",
Owner: "Owner",
UserAttribute: "User Attribute",
PayloadHash: "Payload Hash",
Parent: "Parent",
Split: "Split",
ContainerCounters: "Container Counters",
ECInfo: "EC Info",
ExpirationEpochToObject: "Exp. Epoch to Object",
ObjectToExpirationEpoch: "Object to Exp. Epoch",
Graveyard: "Graveyard",
Garbage: "Garbage",
ToMoveIt: "To Move It",
ContainerVolume: "Container Volume",
Locked: "Locked",
ShardInfo: "Shard Info",
Primary: "Primary",
Lockers: "Lockers",
Tombstone: "Tombstone",
Small: "Small",
Root: "Root",
Owner: "Owner",
UserAttribute: "User Attribute",
PayloadHash: "Payload Hash",
Parent: "Parent",
Split: "Split",
ContainerCounters: "Container Counters",
ECInfo: "EC Info",
}
func (p Prefix) String() string {

View file

@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string {
return common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
)
}
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf(
"%s CID %s",
common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(b.id.String(), tcell.ColorAqua),
)
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,

View file

@ -2,7 +2,6 @@ package buckets
import (
"errors"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -58,11 +57,10 @@ var (
)
var (
ErrNotBucket = errors.New("not a bucket")
ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix")
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
ErrNotBucket = errors.New("not a bucket")
ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix")
)
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
}
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
}
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil {
return nil, nil, ErrNotBucket
@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
return nil, nil, err
}
b.key = string(key[33:])
if len(keys) != 0 && !slices.Contains(keys, b.key) {
return nil, nil, ErrUnexpectedAttributeKey
}
return &b, next, nil
}
}

View file

@ -5,30 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
)
var MetabaseParserV3 = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
buckets.ContainerVolumeParser,
buckets.LockedParser,
buckets.ShardInfoParser,
buckets.PrimaryParser,
buckets.LockersParser,
buckets.TombstoneParser,
buckets.SmallParser,
buckets.RootParser,
buckets.UserAttributeParserV3,
buckets.ParentParser,
buckets.SplitParser,
buckets.ContainerCountersParser,
buckets.ECInfoParser,
buckets.ExpirationEpochToObjectParser,
buckets.ObjectToExpirationEpochParser,
),
common.RawParser.ToFallbackParser(),
)
var MetabaseParserV2 = common.WithFallback(
var MetabaseParser = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
buckets.SmallParser,
buckets.RootParser,
buckets.OwnerParser,
buckets.UserAttributeParserV2,
buckets.UserAttributeParser,
buckets.PayloadHashParser,
buckets.ParentParser,
buckets.SplitParser,

View file

@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
return spew.Sdump(*r)
}

View file

@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No
}
}
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "cid":
id := val.(cid.ID)
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}

View file

@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
}
return &r, nil, nil
}
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 72 {
return nil, nil, ErrInvalidKeyLength
}
var (
r ExpirationEpochToObjectRecord
err error
)
r.epoch = binary.BigEndian.Uint64(key[:8])
if err = r.cnt.Decode(key[8:40]); err != nil {
return nil, nil, err
}
if err = r.obj.Decode(key[40:]); err != nil {
return nil, nil, err
}
return &r, nil, nil
}
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 32 {
return nil, nil, ErrInvalidKeyLength
}
if len(value) != 8 {
return nil, nil, ErrInvalidValueLength
}
var (
r ObjectToExpirationEpochRecord
err error
)
if err = r.obj.Decode(key); err != nil {
return nil, nil, err
}
r.epoch = binary.LittleEndian.Uint64(value)
return &r, nil, nil
}

View file

@ -2,7 +2,6 @@ package records
import (
"fmt"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2"
@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
len(r.ids),
)
}
func (r *ExpirationEpochToObjectRecord) String() string {
return fmt.Sprintf(
"exp. epoch %s %c CID %s OID %s",
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
)
}
func (r *ObjectToExpirationEpochRecord) String() string {
return fmt.Sprintf(
"OID %s %c exp. epoch %s",
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
)
}

View file

@ -79,15 +79,4 @@ type (
id oid.ID
ids []oid.ID
}
ExpirationEpochToObjectRecord struct {
epoch uint64
cnt cid.ID
obj oid.ID
}
ObjectToExpirationEpochRecord struct {
obj oid.ID
epoch uint64
}
)

View file

@ -1,8 +1,6 @@
package tui
import (
"slices"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
f.history = append(f.history, s)
}

View file

@ -33,7 +33,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
@ -70,7 +69,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -117,6 +115,7 @@ type applicationConfiguration struct {
EngineCfg struct {
errorThreshold uint32
shardPoolSize uint32
shards []shardCfg
lowMem bool
}
@ -135,7 +134,6 @@ type shardCfg struct {
refillMetabase bool
refillMetabaseWorkersCount int
mode shardmode.Mode
limiter qos.Limiter
metaCfg struct {
path string
@ -249,47 +247,45 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
var target shardCfg
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
var newConfig shardCfg
target.refillMetabase = source.RefillMetabase()
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
target.mode = source.Mode()
target.compress = source.Compress()
target.estimateCompressibility = source.EstimateCompressibility()
target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold()
target.uncompressableContentType = source.UncompressableContentTypes()
target.smallSizeObjectLimit = source.SmallSizeLimit()
newConfig.refillMetabase = oldConfig.RefillMetabase()
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
newConfig.mode = oldConfig.Mode()
newConfig.compress = oldConfig.Compress()
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
a.setShardWriteCacheConfig(&target, source)
a.setShardWriteCacheConfig(&newConfig, oldConfig)
a.setShardPiloramaConfig(c, &target, source)
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
if err := a.setShardStorageConfig(&target, source); err != nil {
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
return err
}
a.setMetabaseConfig(&target, source)
a.setMetabaseConfig(&newConfig, oldConfig)
a.setGCConfig(&target, source)
if err := a.setLimiter(&target, source); err != nil {
return err
}
a.setGCConfig(&newConfig, oldConfig)
a.EngineCfg.shards = append(a.EngineCfg.shards, target)
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
return nil
}
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
writeCacheCfg := source.WriteCache()
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
writeCacheCfg := oldConfig.WriteCache()
if writeCacheCfg.Enabled() {
wc := &target.writecacheCfg
wc := &newConfig.writecacheCfg
wc.enabled = true
wc.path = writeCacheCfg.Path()
@ -302,10 +298,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so
}
}
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") {
piloramaCfg := source.Pilorama()
pr := &target.piloramaCfg
piloramaCfg := oldConfig.Pilorama()
pr := &newConfig.piloramaCfg
pr.enabled = true
pr.path = piloramaCfg.Path()
@ -316,8 +312,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
}
}
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
blobStorCfg := source.BlobStor()
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
blobStorCfg := oldConfig.BlobStor()
storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg))
@ -351,13 +347,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
ss = append(ss, sCfg)
}
target.subStorages = ss
newConfig.subStorages = ss
return nil
}
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
metabaseCfg := source.Metabase()
m := &target.metaCfg
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
metabaseCfg := oldConfig.Metabase()
m := &newConfig.metaCfg
m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm()
@ -365,25 +361,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
}
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
gcCfg := source.GC()
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
limitsConfig := source.Limits()
limiter, err := qos.NewLimiter(limitsConfig)
if err != nil {
return err
}
if target.limiter != nil {
target.limiter.Close()
}
target.limiter = limiter
return nil
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
gcCfg := oldConfig.GC()
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
// internals contains application-specific internals that are created
@ -473,6 +456,7 @@ type shared struct {
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
@ -544,8 +528,6 @@ type cfgGRPC struct {
maxChunkSize uint64
maxAddrAmount uint64
reconnectTimeout time.Duration
limiter atomic.Pointer[limiting.SemaphoreLimiter]
}
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
@ -682,6 +664,10 @@ type cfgAccessPolicyEngine struct {
}
type cfgObjectRoutines struct {
putRemote *ants.Pool
putLocal *ants.Pool
replication *ants.Pool
}
@ -713,8 +699,7 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm := c.loggerPrm()
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
@ -867,14 +852,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
}
func initCfgGRPC() (cfg cfgGRPC) {
func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
cfg.maxChunkSize = maxChunkSize
cfg.maxAddrAmount = maxAddrAmount
return
return cfgGRPC{
maxChunkSize: maxChunkSize,
maxAddrAmount: maxAddrAmount,
}
}
func initCfgObject(appCfg *config.Config) cfgObject {
@ -891,6 +876,7 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option
opts = append(opts,
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
@ -930,7 +916,6 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync),
writecache.WithLogger(c.log),
writecache.WithQoSLimiter(shCfg.limiter),
)
}
return writeCacheOpts
@ -1046,7 +1031,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
}
if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
}
var sh shardOptsWithID
@ -1071,27 +1055,30 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return pool
}),
shard.WithLimiter(shCfg.limiter),
}
return sh
}
func (c *cfg) loggerPrm() (logger.Prm, error) {
var prm logger.Prm
// (re)init read configuration
err := prm.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
func (c *cfg) loggerPrm() *logger.Prm {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
}
err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
prm.PrependTimestamp = c.LoggerCfg.timestamp
return prm, nil
// (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log level format: " + c.LoggerCfg.level)
}
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1179,7 +1166,21 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error
optNonBlocking := ants.WithNonblocking(true)
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
fatalOnErr(err)
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
fatalOnErr(err)
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
if replicatorPoolSize <= 0 {
replicatorPoolSize = putRemoteCapacity
}
pool.replication, err = ants.NewPool(replicatorPoolSize)
fatalOnErr(err)
@ -1331,7 +1332,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support
// Logger's dynamic reconfiguration approach
components := c.getComponents(ctx)
// Logger
logPrm := c.loggerPrm()
components := c.getComponents(ctx, logPrm)
// Object
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
@ -1369,17 +1374,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) getComponents(ctx context.Context) []dCmp {
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
var components []dCmp
components = append(components, dCmp{"logger", func() error {
prm, err := c.loggerPrm()
if err != nil {
return err
}
c.log.Reload(prm)
return nil
}})
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(ctx, c)
return nil
@ -1412,13 +1410,17 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
return components
}
func (c *cfg) reloadPools() error {
newSize := replicatorconfig.PoolSize(c.appCfg)
newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
newSize = replicatorconfig.PoolSize(c.appCfg)
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
return nil

View file

@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) {
dir := t.TempDir()
cfgFileName := path.Join(dir, "cfg_01.yml")
cfgFileName0 := path.Join(dir, "cfg_00.json")
cfgFileName1 := path.Join(dir, "cfg_01.yml")
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
}

View file

@ -11,6 +11,10 @@ import (
const (
subsection = "storage"
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil
}
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
//
// Returns ShardPoolSizeDefault if the value is not a positive number.
func ShardPoolSize(c *config.Config) uint32 {
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
if v > 0 {
return v
}
return ShardPoolSizeDefault
}
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
//
// Returns 0 if the the value is missing.

View file

@ -11,7 +11,6 @@ import (
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
@ -54,6 +53,7 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
})
@ -63,6 +63,7 @@ func TestEngineSection(t *testing.T) {
num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@ -75,7 +76,6 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages()
pl := sc.Pilorama()
gc := sc.GC()
limits := sc.Limits()
switch num {
case 0:
@ -134,75 +134,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
require.ElementsMatch(t, readLimits.Tags,
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(20),
ReservedOps: toPtr(1000),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(70),
ReservedOps: toPtr(10000),
},
{
Tag: "background",
Weight: toPtr(5),
LimitOps: toPtr(10000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
})
require.ElementsMatch(t, writeLimits.Tags,
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(200),
ReservedOps: toPtr(100),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(700),
ReservedOps: toPtr(1000),
},
{
Tag: "background",
Weight: toPtr(50),
LimitOps: toPtr(1000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
{
Tag: "policer",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
})
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@ -257,17 +188,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
require.Equal(t, 0, len(readLimits.Tags))
require.Equal(t, 0, len(writeLimits.Tags))
}
return nil
})
@ -281,7 +201,3 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}
func toPtr(v float64) *float64 {
return &v
}

View file

@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0)
if d < 0 {
d = 0
}
return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0)
if s < 0 {
s = 0
}
return s
}
// NoSync returns the value of "no_sync" config parameter.
@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
return max(s, 0)
if s < 0 {
s = 0
}
return s
}

View file

@ -4,7 +4,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
@ -126,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config {
)
}
// Limits returns "limits" subsection as a limitsconfig.Config.
func (x *Config) Limits() *limitsconfig.Config {
return limitsconfig.From(
(*config.Config)(x).
Sub("limits"),
)
}
// RefillMetabase returns the value of "resync_metabase" config parameter.
//
// Returns false if the value is not a valid bool.

View file

@ -1,130 +0,0 @@
package limits
import (
"math"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/spf13/cast"
)
const (
NoLimit int64 = math.MaxInt64
DefaultIdleTimeout = 5 * time.Minute
)
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
}
// Config is a wrapper over the config section
// which provides access to Shard's limits configurations.
type Config config.Config
// Read returns the value of "read" limits config section.
func (x *Config) Read() OpConfig {
return x.parse("read")
}
// Write returns the value of "write" limits config section.
func (x *Config) Write() OpConfig {
return x.parse("write")
}
func (x *Config) parse(sub string) OpConfig {
c := (*config.Config)(x).Sub(sub)
var result OpConfig
if s := config.Int(c, "max_waiting_ops"); s > 0 {
result.MaxWaitingOps = s
} else {
result.MaxWaitingOps = NoLimit
}
if s := config.Int(c, "max_running_ops"); s > 0 {
result.MaxRunningOps = s
} else {
result.MaxRunningOps = NoLimit
}
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
result.IdleTimeout = s
} else {
result.IdleTimeout = DefaultIdleTimeout
}
result.Tags = tags(c)
return result
}
type OpConfig struct {
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxWaitingOps int64
// MaxRunningOps returns the value of "max_running_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxRunningOps int64
// IdleTimeout returns the value of "idle_timeout" config parameter.
//
// Equals DefaultIdleTimeout if the value is not a valid duration.
IdleTimeout time.Duration
// Tags returns the value of "tags" config parameter.
//
// Equals nil if the value is not a valid tags config slice.
Tags []IOTagConfig
}
type IOTagConfig struct {
Tag string
Weight *float64
LimitOps *float64
ReservedOps *float64
}
func tags(c *config.Config) []IOTagConfig {
c = c.Sub("tags")
var result []IOTagConfig
for i := 0; ; i++ {
tag := config.String(c, strconv.Itoa(i)+".tag")
if tag == "" {
return result
}
var tagConfig IOTagConfig
tagConfig.Tag = tag
v := c.Value(strconv.Itoa(i) + ".weight")
if v != nil {
w, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.Weight = &w
}
v = c.Value(strconv.Itoa(i) + ".limit_ops")
if v != nil {
l, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.LimitOps = &l
}
v = c.Value(strconv.Itoa(i) + ".reserved_ops")
if v != nil {
r, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.ReservedOps = &r
}
result = append(result, tagConfig)
}
}
func panicOnErr(err error) {
if err != nil {
panic(err)
}
}

View file

@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0)
if d <= 0 {
d = 0
}
return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0)
if s <= 0 {
s = 0
}
return s
}

View file

@ -21,6 +21,10 @@ const (
putSubsection = "put"
getSubsection = "get"
// PutPoolSizeDefault is a default value of routine pool size to
// process object.Put requests in object service.
PutPoolSizeDefault = 10
)
// Put returns structure that provides access to "put" subsection of
@ -31,6 +35,30 @@ func Put(c *config.Config) PutConfig {
}
}
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
//
// Returns PutPoolSizeDefault if the value is not a positive number.
func (g PutConfig) PoolSizeRemote() int {
v := config.Int(g.cfg, "remote_pool_size")
if v > 0 {
return int(v)
}
return PutPoolSizeDefault
}
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
//
// Returns PutPoolSizeDefault if the value is not a positive number.
func (g PutConfig) PoolSizeLocal() int {
v := config.Int(g.cfg, "local_pool_size")
if v > 0 {
return int(v)
}
return PutPoolSizeDefault
}
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")

View file

@ -13,6 +13,8 @@ func TestObjectSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
})
@ -20,6 +22,8 @@ func TestObjectSection(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
}

View file

@ -11,8 +11,6 @@ const (
// PutTimeoutDefault is a default timeout of object put request in replicator.
PutTimeoutDefault = 5 * time.Second
// PoolSizeDefault is a default pool size for put request in replicator.
PoolSizeDefault = 10
)
// PutTimeout returns the value of "put_timeout" config parameter
@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration {
// PoolSize returns the value of "pool_size" config parameter
// from "replicator" section.
//
// Returns PoolSizeDefault if the value is non-positive integer.
func PoolSize(c *config.Config) int {
v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
if v > 0 {
return v
}
return PoolSizeDefault
return int(config.IntSafe(c.Sub(subsection), "pool_size"))
}

View file

@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
require.Equal(t, 0, replicatorconfig.PoolSize(empty))
})
const path = "../../../../config/example/node"

View file

@ -1,42 +0,0 @@
package rpcconfig
import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
const (
subsection = "rpc"
limitsSubsection = "limits"
)
type LimitConfig struct {
Methods []string
MaxOps int64
}
// Limits returns the "limits" config from "rpc" section.
func Limits(c *config.Config) []LimitConfig {
c = c.Sub(subsection).Sub(limitsSubsection)
var limits []LimitConfig
for i := uint64(0); ; i++ {
si := strconv.FormatUint(i, 10)
sc := c.Sub(si)
methods := config.StringSliceSafe(sc, "methods")
if len(methods) == 0 {
break
}
if sc.Value("max_ops") == nil {
panic("no max operations for method group")
}
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
}
return limits
}

View file

@ -1,77 +0,0 @@
package rpcconfig
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestRPCSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
require.Empty(t, Limits(configtest.EmptyConfig()))
})
t.Run("correct config", func(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(1000))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
t.Run("no max operations", func(t *testing.T) {
const path = "testdata/no_max_ops"
fileConfigTest := func(c *config.Config) {
require.Panics(t, func() { _ = Limits(c) })
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
t.Run("zero max operations", func(t *testing.T) {
const path = "testdata/zero_max_ops"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(0))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
}

View file

@ -1,3 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,18 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
]
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,8 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -1,4 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,19 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
],
"max_ops": 0
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,9 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 0
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -4,18 +4,14 @@ import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"time"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
"google.golang.org/grpc"
@ -138,13 +134,11 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
grpc.ChainStreamInterceptor(
qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(),
qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
}
@ -233,54 +227,3 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}
func initRPCLimiter(c *cfg) error {
var limits []limiting.KeyLimit
for _, l := range rpcconfig.Limits(c.appCfg) {
limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
}
if err := validateRPCLimits(c, limits); err != nil {
return fmt.Errorf("validate RPC limits: %w", err)
}
limiter, err := limiting.NewSemaphoreLimiter(limits)
if err != nil {
return fmt.Errorf("create RPC limiter: %w", err)
}
c.cfgGRPC.limiter.Store(limiter)
return nil
}
func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
availableMethods := getAvailableMethods(c.cfgGRPC.servers)
for _, limit := range limits {
for _, method := range limit.Keys {
if _, ok := availableMethods[method]; !ok {
return fmt.Errorf("set limit on an unknown method %q", method)
}
}
}
return nil
}
func getAvailableMethods(servers []grpcServer) map[string]struct{} {
res := make(map[string]struct{})
for _, server := range servers {
for _, method := range getMethodsForServer(server.Server) {
res[method] = struct{}{}
}
}
return res
}
func getMethodsForServer(server *grpc.Server) []string {
var res []string
for service, info := range server.GetServiceInfo() {
for _, method := range info.Methods {
res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
}
}
return res
}

View file

@ -117,8 +117,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "apemanager", initAPEManagerService)
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}

View file

@ -16,6 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
@ -171,10 +172,12 @@ func initObjectService(c *cfg) {
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
apeSvc := createAPEService(c, &irFetcher, splitSvc)
apeSvc := createAPEService(c, splitSvc)
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
var commonSvc objectService.Common
commonSvc.Init(&c.internals, apeSvc)
commonSvc.Init(&c.internals, aclSvc)
respSvc := objectService.NewResponseService(
&commonSvc,
@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
})
}
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
}
@ -323,6 +326,7 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
objectwriter.WithLogger(c.log),
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
@ -426,7 +430,17 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
)
}
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
return v2.New(
apeSvc,
c.netMapSource,
irFetcher,
c.cfgObject.cnrSource,
v2.WithLogger(c.log),
)
}
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
@ -438,7 +452,6 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
c.cfgObject.cnrSource,
c.binPublicKey,
),
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
splitSvc,
)
}
@ -451,7 +464,7 @@ func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Ad
return e.engine.IsLocked(ctx, address)
}
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID, expEpoch uint64) error {
var prm engine.InhumePrm
addrs := make([]oid.Address, len(toDelete))
@ -460,7 +473,7 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
addrs[i].SetObject(toDelete[i])
}
prm.WithTarget(tombstone, addrs...)
prm.WithTarget(tombstone, expEpoch, addrs...)
return e.engine.Inhume(ctx, prm)
}

View file

@ -47,7 +47,7 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
}
ioTag, err := qos.FromRawString(rawTag)
if err != nil {
s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
@ -70,7 +70,6 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
for _, pk := range s.allowedInternalPubs {
@ -88,10 +87,9 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default:
s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}

View file

@ -1,6 +1,7 @@
package main
import (
"os"
"path/filepath"
"testing"
@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
require.NoError(t, err)
})
})
t.Run("mainnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
t.Run("testnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
}

View file

@ -87,16 +87,14 @@ FROSTFS_REPLICATOR_POOL_SIZE=10
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
# Object service section
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard
### Flag to refill Metabase from BlobStor
@ -156,47 +154,6 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
#### Limit of concurrent workers collecting expired objects by the garbage collector
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
#### Limits config
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
## 1 shard
### Flag to refill Metabase from BlobStor

View file

@ -134,30 +134,16 @@
"tombstone_lifetime": 10
},
"put": {
"remote_pool_size": 100,
"local_pool_size": 200,
"skip_session_token_issuer_verification": true
},
"get": {
"priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
}
},
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
],
"max_ops": 1000
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
},
"storage": {
"shard_pool_size": 15,
"shard_ro_error_threshold": 100,
"shard": {
"0": {
@ -220,76 +206,6 @@
"remover_sleep_interval": "2m",
"expired_collector_batch_size": 1500,
"expired_collector_worker_count": 15
},
"limits": {
"read": {
"max_running_ops": 10000,
"max_waiting_ops": 1000,
"idle_timeout": "30s",
"tags": [
{
"tag": "internal",
"weight": 20,
"limit_ops": 0,
"reserved_ops": 1000
},
{
"tag": "client",
"weight": 70,
"reserved_ops": 10000
},
{
"tag": "background",
"weight": 5,
"limit_ops": 10000,
"reserved_ops": 0
},
{
"tag": "writecache",
"weight": 5,
"limit_ops": 25000
},
{
"tag": "policer",
"weight": 5,
"limit_ops": 25000
}
]
},
"write": {
"max_running_ops": 1000,
"max_waiting_ops": 100,
"idle_timeout": "45s",
"tags": [
{
"tag": "internal",
"weight": 200,
"limit_ops": 0,
"reserved_ops": 100
},
{
"tag": "client",
"weight": 700,
"reserved_ops": 1000
},
{
"tag": "background",
"weight": 50,
"limit_ops": 1000,
"reserved_ops": 0
},
{
"tag": "writecache",
"weight": 50,
"limit_ops": 2500
},
{
"tag": "policer",
"weight": 50,
"limit_ops": 2500
}
]
}
}
},
"1": {

View file

@ -117,24 +117,17 @@ object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
put:
remote_pool_size: 100 # number of async workers for remote PUT operations
local_pool_size: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
get:
priority: # list of metrics of nodes for prioritization
- $attribute:ClusterName
- $attribute:UN-LOCODE
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 1000
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000
storage:
# note: shard configuration can be omitted for relay node (see `node.relay`)
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard:
@ -226,52 +219,6 @@ storage:
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
limits:
read:
max_running_ops: 10000
max_waiting_ops: 1000
idle_timeout: 30s
tags:
- tag: internal
weight: 20
limit_ops: 0
reserved_ops: 1000
- tag: client
weight: 70
reserved_ops: 10000
- tag: background
weight: 5
limit_ops: 10000
reserved_ops: 0
- tag: writecache
weight: 5
limit_ops: 25000
- tag: policer
weight: 5
limit_ops: 25000
write:
max_running_ops: 1000
max_waiting_ops: 100
idle_timeout: 45s
tags:
- tag: internal
weight: 200
limit_ops: 0
reserved_ops: 100
- tag: client
weight: 700
reserved_ops: 1000
- tag: background
weight: 50
limit_ops: 1000
reserved_ops: 0
- tag: writecache
weight: 50
limit_ops: 2500
- tag: policer
weight: 50
limit_ops: 2500
1:
writecache:
path: tmp/1/cache # write-cache root directory

28
config/mainnet/README.md Normal file
View file

@ -0,0 +1,28 @@
# N3 Mainnet Storage node configuration
Here is a template for simple storage node configuration in N3 Mainnet.
Make sure to specify correct values instead of `<...>` placeholders.
Do not change `contracts` section. Run the latest frostfs-node release with
the fixed config `frostfs-node -c config.yml`
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
## Tips
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
```yaml
node:
addresses:
- grpcs://frostfs.my.org:8080
grpc:
num: 1
0:
endpoint: frostfs.my.org:8080
tls:
enabled: true
certificate: /path/to/cert
key: /path/to/key
```

70
config/mainnet/config.yml Normal file
View file

@ -0,0 +1,70 @@
node:
wallet:
path: <path/to/wallet>
address: <address-in-wallet>
password: <password>
addresses:
- <announced.address:port>
attribute_0: UN-LOCODE:<XX YYY>
attribute_1: Price:100000
attribute_2: User-Agent:FrostFS\/0.9999
grpc:
num: 1
0:
endpoint: <listen.local.address:port>
tls:
enabled: false
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/path/metabase
perm: 0600
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m
logger:
level: info
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
object:
put:
remote_pool_size: 100
local_pool_size: 100
morph:
rpc_endpoint:
- wss://rpc1.morph.frostfs.info:40341/ws
- wss://rpc2.morph.frostfs.info:40341/ws
- wss://rpc3.morph.frostfs.info:40341/ws
- wss://rpc4.morph.frostfs.info:40341/ws
- wss://rpc5.morph.frostfs.info:40341/ws
- wss://rpc6.morph.frostfs.info:40341/ws
- wss://rpc7.morph.frostfs.info:40341/ws
dial_timeout: 20s
contracts:
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1

129
config/testnet/README.md Normal file
View file

@ -0,0 +1,129 @@
# N3 Testnet Storage node configuration
There is a prepared configuration for NeoFS Storage Node deployment in
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
docker image and run it with docker-compose.
## Build image
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
However, if you need to rebuild it for some reason, run
`make image-storage-testnet` command.
```
$ make image-storage-testnet
...
Successfully built ab0557117b02
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
```
## Deploy node
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
update docker-compose file and start the node.
### Deposit
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
First, obtain GAS in N3 Testnet chain. You can do that with
[faucet](https://neowish.ngd.network) service.
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
You can provide scripthash in the `data` argument of transfer tx to make a
deposit to a specified account. Otherwise, deposit is made to the tx sender.
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
See a deposit example with `neo-go`.
```
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
--token GAS \
--amount 1
```
### Configure
Next, configure `node_config.env` file. Change endpoints values. Both
should contain your **public** IP.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
```
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
attribute.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
```
You can validate UN/LOCODE attribute in
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
with frostfs-cli.
```
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
Country: Russia
Location: Saint Petersburg (ex Leningrad)
Continent: Europe
Subdivision: [SPE] Sankt-Peterburg
Coordinates: 59.53, 30.15
```
It is recommended to pass the node's key as a file. To do so, convert your wallet
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
```
// Print WIF in a 32-byte hex format
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
// Save 32-byte hex into a file
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
```
Then, specify the path to this file in `docker-compose.yml`
```yaml
volumes:
- frostfs_storage:/storage
- ./my_wallet.key:/node.key
```
NeoFS objects will be stored on your machine. By default, docker-compose
is configured to store objects in named docker volume `frostfs_storage`. You can
specify a directory on the filesystem to store objects there.
```yaml
volumes:
- /home/username/frostfs/rc3/storage:/storage
- ./my_wallet.key:/node.key
```
### Start
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
### Debug
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
log, set up log level to debug with this env:
```yaml
environment:
- NEOFS_LOGGER_LEVEL=debug
```

52
config/testnet/config.yml Normal file
View file

@ -0,0 +1,52 @@
logger:
level: info
morph:
rpc_endpoint:
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
dial_timeout: 20s
contracts:
balance: e0420c216003747626670d1424569c17c79015bf
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
node:
key: /node.key
attribute_0: Deployed:SelfHosted
attribute_1: User-Agent:FrostFS\/0.9999
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/metabase
perm: 0777
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m

View file

@ -51,7 +51,10 @@ However, all mode changing operations are idempotent.
## Automatic mode changes
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
2. If shard error counter exceeds threshold.
3. If the metabase couldn't be reopened during SIGHUP handling.
# Detach shard

View file

@ -170,6 +170,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@ -194,7 +195,6 @@ The following table describes configuration for each shard.
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
| `gc` | [GC config](#gc-subsection) | | GC configuration. |
| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
### `blobstor` subsection
@ -301,64 +301,6 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
### `limits` subsection
```yaml
limits:
max_read_running_ops: 10000
max_read_waiting_ops: 1000
max_write_running_ops: 1000
max_write_waiting_ops: 100
read:
- tag: internal
weight: 20
limit_ops: 0
reserved_ops: 1000
- tag: client
weight: 70
reserved_ops: 10000
- tag: background
weight: 5
limit_ops: 10000
reserved_ops: 0
- tag: writecache
weight: 5
limit_ops: 25000
- tag: policer
weight: 5
limit_ops: 25000
write:
- tag: internal
weight: 200
limit_ops: 0
reserved_ops: 100
- tag: client
weight: 700
reserved_ops: 1000
- tag: background
weight: 50
limit_ops: 1000
reserved_ops: 0
- tag: writecache
weight: 50
limit_ops: 2500
- tag: policer
weight: 50
limit_ops: 2500
```
| Parameter | Type | Default value | Description |
| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
| `read` | `[]tag` | empty | Array of shard read settings for tags. |
| `write` | `[]tag` | empty | Array of shard write settings for tags. |
| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
# `node` section
@ -454,16 +396,18 @@ replicator:
pool_size: 10
```
| Parameter | Type | Default value | Description |
|---------------|------------|---------------|---------------------------------------------|
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
| Parameter | Type | Default value | Description |
|---------------|------------|----------------------------------------|---------------------------------------------|
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
# `object` section
Contains object-service related parameters.
```yaml
object:
put:
remote_pool_size: 100
get:
priority:
- $attribute:ClusterName
@ -472,29 +416,10 @@ object:
| Parameter | Type | Default value | Description |
|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
# `rpc` section
Contains limits on the number of active RPC for specified method(s).
```yaml
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 1000
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000
```
| Parameter | Type | Default value | Description |
|------------------|------------|---------------|--------------------------------------------------------------|
| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
# `runtime` section
Contains runtime parameters.

12
go.mod
View file

@ -1,18 +1,18 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
go 1.23
go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/VictoriaMetrics/easyproto v0.1.4

20
go.sum
View file

@ -1,25 +1,25 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=

View file

@ -1,9 +0,0 @@
package assert
import "strings"
func True(cond bool, details ...string) {
if !cond {
panic(strings.Join(details, " "))
}
}

View file

@ -125,6 +125,7 @@ const (
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
SearchLocalOperationFailed = "local operation failed"
UtilObjectServiceError = "object service error"
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
@ -252,7 +253,8 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
ShardCouldNotFindObject = "could not find object"
ShardUnknownObjectTypeWhileIteratingExpiredObjects = "encountered unknown object type while iterating expired objects"
ShardFailedToRemoveExpiredGraves = "failed to remove expired graves"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
@ -513,6 +515,4 @@ const (
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
)

View file

@ -23,7 +23,6 @@ const (
policerSubsystem = "policer"
commonCacheSubsystem = "common_cache"
multinetSubsystem = "multinet"
qosSubsystem = "qos"
successLabel = "success"
shardIDLabel = "shard_id"
@ -44,7 +43,6 @@ const (
hitLabel = "hit"
cacheLabel = "cache"
sourceIPLabel = "source_ip"
ioTagLabel = "io_tag"
readWriteMode = "READ_WRITE"
readOnlyMode = "READ_ONLY"

View file

@ -11,7 +11,7 @@ import (
type GCMetrics interface {
AddRunDuration(shardID string, d time.Duration, success bool)
AddDeletedCount(shardID string, deleted, failed uint64)
AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string)
AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool)
AddInhumedObjectCount(shardID string, count uint64, objectType string)
}
@ -71,11 +71,10 @@ func (m *gcMetrics) AddDeletedCount(shardID string, deleted, failed uint64) {
}).Add(float64(failed))
}
func (m *gcMetrics) AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string) {
func (m *gcMetrics) AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool) {
m.expCollectDuration.With(prometheus.Labels{
shardIDLabel: shardID,
successLabel: strconv.FormatBool(success),
objectTypeLabel: objectType,
shardIDLabel: shardID,
successLabel: strconv.FormatBool(success),
}).Add(d.Seconds())
}

View file

@ -26,7 +26,6 @@ type NodeMetrics struct {
morphCache *morphCacheMetrics
log logger.LogMetrics
multinet *multinetMetrics
qos *QoSMetrics
// nolint: unused
appInfo *ApplicationInfo
}
@ -56,7 +55,6 @@ func NewNodeMetrics() *NodeMetrics {
log: logger.NewLogMetrics(namespace),
appInfo: NewApplicationInfo(misc.Version),
multinet: newMultinetMetrics(namespace),
qos: newQoSMetrics(),
}
}
@ -128,7 +126,3 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
return m.multinet
}
func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
return m.qos
}

View file

@ -9,14 +9,13 @@ import (
)
type ObjectServiceMetrics interface {
AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
AddRequestDuration(method string, d time.Duration, success bool)
AddPayloadSize(method string, size int)
}
type objectServiceMetrics struct {
methodDuration *prometheus.HistogramVec
payloadCounter *prometheus.CounterVec
ioTagOpsCounter *prometheus.CounterVec
methodDuration *prometheus.HistogramVec
payloadCounter *prometheus.CounterVec
}
func newObjectServiceMetrics() *objectServiceMetrics {
@ -33,24 +32,14 @@ func newObjectServiceMetrics() *objectServiceMetrics {
Name: "request_payload_bytes",
Help: "Object Service request payload",
}, []string{methodLabel}),
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: objectSubsystem,
Name: "requests_total",
Help: "Count of requests for each IO tag",
}, []string{methodLabel, ioTagLabel}),
}
}
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
m.methodDuration.With(prometheus.Labels{
methodLabel: method,
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
m.ioTagOpsCounter.With(prometheus.Labels{
ioTagLabel: ioTag,
methodLabel: method,
}).Inc()
}
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {

View file

@ -1,52 +0,0 @@
package metrics
import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
type QoSMetrics struct {
opsCounter *prometheus.GaugeVec
}
func newQoSMetrics() *QoSMetrics {
return &QoSMetrics{
opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: qosSubsystem,
Name: "operations_total",
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
}
}
func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "pending",
}).Set(float64(pending))
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "in_progress",
}).Set(float64(inProgress))
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "completed",
}).Set(float64(completed))
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "resource_exhausted",
}).Set(float64(resourceExhausted))
}
func (m *QoSMetrics) Close(shardID string) {
m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
}

View file

@ -12,14 +12,12 @@ type TreeMetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool)
AddOperation(string, string)
}
type treeServiceMetrics struct {
replicateTaskDuration *prometheus.HistogramVec
replicateWaitDuration *prometheus.HistogramVec
syncOpDuration *prometheus.HistogramVec
ioTagOpsCounter *prometheus.CounterVec
}
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
@ -44,12 +42,6 @@ func newTreeServiceMetrics() *treeServiceMetrics {
Name: "sync_duration_seconds",
Help: "Duration of synchronization operations",
}, []string{successLabel}),
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: treeServiceSubsystem,
Name: "requests_total",
Help: "Count of requests for each IO tag",
}, []string{methodLabel, ioTagLabel}),
}
}
@ -70,10 +62,3 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds())
}
func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
m.ioTagOpsCounter.With(prometheus.Labels{
ioTagLabel: ioTag,
methodLabel: op,
}).Inc()
}

View file

@ -3,9 +3,7 @@ package qos
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"google.golang.org/grpc"
)
@ -26,7 +24,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
if err != nil {
tag = IOTagClient
}
if tag.IsLocal() {
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
@ -44,43 +42,10 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
if err != nil {
tag = IOTagClient
}
if tag.IsLocal() {
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
return streamer(ctx, desc, cc, method, opts...)
}
}
func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
return handler(ctx, req)
}
release, ok := getLimiter().Acquire(info.FullMethod)
if !ok {
return nil, new(apistatus.ResourceExhausted)
}
defer release()
return handler(ctx, req)
}
}
//nolint:contextcheck (grpc.ServerStream manages the context itself)
func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
return handler(srv, ss)
}
release, ok := getLimiter().Acquire(info.FullMethod)
if !ok {
return new(apistatus.ResourceExhausted)
}
defer release()
return handler(srv, ss)
}
}

View file

@ -1,236 +0,0 @@
package qos
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
const (
defaultIdleTimeout time.Duration = 0
defaultShare float64 = 1.0
minusOne = ^uint64(0)
defaultMetricsCollectTimeout = 5 * time.Second
)
type ReleaseFunc scheduling.ReleaseFunc
type Limiter interface {
ReadRequest(context.Context) (ReleaseFunc, error)
WriteRequest(context.Context) (ReleaseFunc, error)
SetParentID(string)
SetMetrics(Metrics)
Close()
}
type scheduler interface {
RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
Close()
}
func NewLimiter(c *limits.Config) (Limiter, error) {
if err := validateConfig(c); err != nil {
return nil, err
}
readScheduler, err := createScheduler(c.Read())
if err != nil {
return nil, fmt.Errorf("create read scheduler: %w", err)
}
writeScheduler, err := createScheduler(c.Write())
if err != nil {
return nil, fmt.Errorf("create write scheduler: %w", err)
}
l := &mClockLimiter{
readScheduler: readScheduler,
writeScheduler: writeScheduler,
closeCh: make(chan struct{}),
wg: &sync.WaitGroup{},
readStats: createStats(),
writeStats: createStats(),
}
l.shardID.Store(&shardID{})
l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
l.startMetricsCollect()
return l, nil
}
func createScheduler(config limits.OpConfig) (scheduler, error) {
if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit {
return newSemaphoreScheduler(config.MaxRunningOps), nil
}
return scheduling.NewMClock(
uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
converToSchedulingTags(config.Tags), config.IdleTimeout)
}
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
result := make(map[string]scheduling.TagInfo)
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
result[tag.String()] = scheduling.TagInfo{
Share: defaultShare,
}
}
for _, l := range limits {
v := result[l.Tag]
if l.Weight != nil && *l.Weight != 0 {
v.Share = *l.Weight
}
if l.LimitOps != nil && *l.LimitOps != 0 {
v.LimitIOPS = l.LimitOps
}
if l.ReservedOps != nil && *l.ReservedOps != 0 {
v.ReservedIOPS = l.ReservedOps
}
result[l.Tag] = v
}
return result
}
var (
_ Limiter = (*noopLimiter)(nil)
releaseStub ReleaseFunc = func() {}
noopLimiterInstance = &noopLimiter{}
)
func NewNoopLimiter() Limiter {
return noopLimiterInstance
}
type noopLimiter struct{}
func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
return releaseStub, nil
}
func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
return releaseStub, nil
}
func (n *noopLimiter) SetParentID(string) {}
func (n *noopLimiter) Close() {}
func (n *noopLimiter) SetMetrics(Metrics) {}
var _ Limiter = (*mClockLimiter)(nil)
type shardID struct {
id string
}
type mClockLimiter struct {
readScheduler scheduler
writeScheduler scheduler
readStats map[string]*stat
writeStats map[string]*stat
shardID atomic.Pointer[shardID]
metrics atomic.Pointer[metricsHolder]
closeCh chan struct{}
wg *sync.WaitGroup
}
func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
return requestArrival(ctx, n.readScheduler, n.readStats)
}
func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
return requestArrival(ctx, n.writeScheduler, n.writeStats)
}
func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
tag, ok := tagging.IOTagFromContext(ctx)
if !ok {
tag = IOTagClient.String()
}
stat := getStat(tag, stats)
stat.pending.Add(1)
if tag == IOTagCritical.String() {
stat.inProgress.Add(1)
return func() {
stat.completed.Add(1)
}, nil
}
rel, err := s.RequestArrival(ctx, tag)
stat.inProgress.Add(1)
if err != nil {
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) {
stat.resourceExhausted.Add(1)
return nil, &apistatus.ResourceExhausted{}
}
stat.completed.Add(1)
return nil, err
}
return func() {
rel()
stat.completed.Add(1)
}, nil
}
func (n *mClockLimiter) Close() {
n.readScheduler.Close()
n.writeScheduler.Close()
close(n.closeCh)
n.wg.Wait()
n.metrics.Load().metrics.Close(n.shardID.Load().id)
}
func (n *mClockLimiter) SetParentID(parentID string) {
n.shardID.Store(&shardID{id: parentID})
}
func (n *mClockLimiter) SetMetrics(m Metrics) {
n.metrics.Store(&metricsHolder{metrics: m})
}
func (n *mClockLimiter) startMetricsCollect() {
n.wg.Add(1)
go func() {
defer n.wg.Done()
ticker := time.NewTicker(defaultMetricsCollectTimeout)
defer ticker.Stop()
for {
select {
case <-n.closeCh:
return
case <-ticker.C:
shardID := n.shardID.Load().id
if shardID == "" {
continue
}
metrics := n.metrics.Load().metrics
exportMetrics(metrics, n.readStats, shardID, "read")
exportMetrics(metrics, n.writeStats, shardID, "write")
}
}
}()
}
func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
var pending uint64
var inProgress uint64
var completed uint64
var resExh uint64
for tag, s := range stats {
pending = s.pending.Load()
inProgress = s.inProgress.Load()
completed = s.completed.Load()
resExh = s.resourceExhausted.Load()
if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
continue
}
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
}
}

View file

@ -1,31 +0,0 @@
package qos
import "sync/atomic"
type Metrics interface {
SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
Close(shardID string)
}
var _ Metrics = (*noopMetrics)(nil)
type noopMetrics struct{}
func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
}
func (n *noopMetrics) Close(string) {}
// stat presents limiter statistics cumulative counters.
//
// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
type stat struct {
completed atomic.Uint64
pending atomic.Uint64
resourceExhausted atomic.Uint64
inProgress atomic.Uint64
}
type metricsHolder struct {
metrics Metrics
}

View file

@ -1,39 +0,0 @@
package qos
import (
"context"
"errors"
qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
)
var (
_ scheduler = (*semaphore)(nil)
errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
)
type semaphore struct {
s *qosSemaphore.Semaphore
}
func newSemaphoreScheduler(size int64) *semaphore {
return &semaphore{
s: qosSemaphore.NewSemaphore(size),
}
}
func (s *semaphore) Close() {}
func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if s.s.Acquire() {
return s.s.Release, nil
}
return nil, errSemaphoreLimitExceeded
}

View file

@ -1,29 +0,0 @@
package qos
const unknownStatsTag = "unknown"
var statTags = map[string]struct{}{
IOTagBackground.String(): {},
IOTagClient.String(): {},
IOTagCritical.String(): {},
IOTagInternal.String(): {},
IOTagPolicer.String(): {},
IOTagTreeSync.String(): {},
IOTagWritecache.String(): {},
unknownStatsTag: {},
}
func createStats() map[string]*stat {
result := make(map[string]*stat)
for tag := range statTags {
result[tag] = &stat{}
}
return result
}
func getStat(tag string, stats map[string]*stat) *stat {
if v, ok := stats[tag]; ok {
return v
}
return stats[unknownStatsTag]
}

View file

@ -1,42 +1,34 @@
package qos
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
)
import "fmt"
type IOTag string
const (
IOTagBackground IOTag = "background"
IOTagClient IOTag = "client"
IOTagCritical IOTag = "critical"
IOTagInternal IOTag = "internal"
IOTagPolicer IOTag = "policer"
IOTagTreeSync IOTag = "treesync"
IOTagBackground IOTag = "background"
IOTagWritecache IOTag = "writecache"
IOTagPolicer IOTag = "policer"
IOTagCritical IOTag = "critical"
ioTagUnknown IOTag = ""
)
func FromRawString(s string) (IOTag, error) {
switch s {
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagCritical):
return IOTagCritical, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagInternal):
return IOTagInternal, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
case string(IOTagTreeSync):
return IOTagTreeSync, nil
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagWritecache):
return IOTagWritecache, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
default:
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
}
@ -45,15 +37,3 @@ func FromRawString(s string) (IOTag, error) {
func (t IOTag) String() string {
return string(t)
}
func IOTagFromContext(ctx context.Context) string {
tag, ok := tagging.IOTagFromContext(ctx)
if !ok {
tag = "undefined"
}
return tag
}
func (t IOTag) IsLocal() bool {
return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
}

View file

@ -1,93 +0,0 @@
package qos
import (
"errors"
"fmt"
"math"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
)
var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
type tagConfig struct {
Shares, Limit, Reserved *float64
}
func validateConfig(c *limits.Config) error {
if err := validateOpConfig(c.Read()); err != nil {
return fmt.Errorf("limits 'read' section validation error: %w", err)
}
if err := validateOpConfig(c.Write()); err != nil {
return fmt.Errorf("limits 'write' section validation error: %w", err)
}
return nil
}
func validateOpConfig(c limits.OpConfig) error {
if c.MaxRunningOps <= 0 {
return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
}
if c.MaxWaitingOps <= 0 {
return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
}
if c.IdleTimeout <= 0 {
return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
}
if err := validateTags(c.Tags); err != nil {
return fmt.Errorf("'tags' config section validation error: %w", err)
}
return nil
}
func validateTags(configTags []limits.IOTagConfig) error {
tags := map[IOTag]tagConfig{
IOTagBackground: {},
IOTagClient: {},
IOTagInternal: {},
IOTagPolicer: {},
IOTagTreeSync: {},
IOTagWritecache: {},
}
for _, t := range configTags {
tag, err := FromRawString(t.Tag)
if err != nil {
return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
}
if _, ok := tags[tag]; !ok {
return fmt.Errorf("tag %s is not configurable", t.Tag)
}
tags[tag] = tagConfig{
Shares: t.Weight,
Limit: t.LimitOps,
Reserved: t.ReservedOps,
}
}
idx := 0
var shares float64
for t, v := range tags {
if idx == 0 {
idx++
shares = float64Value(v.Shares)
} else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
return errWeightsMustBeSpecified
}
if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
}
if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
}
if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
}
}
return nil
}
func float64Value(f *float64) float64 {
if f == nil {
return 0.0
}
return *f
}

View file

@ -1,6 +1,9 @@
package object
import (
"strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@ -21,3 +24,14 @@ func AddressOf(obj *objectSDK.Object) oid.Address {
return addr
}
// ExpirationEpoch returns the expiration epoch of the object.
func ExpirationEpoch(obj *objectSDK.Object) (epoch uint64, found bool) {
for _, attr := range obj.Attributes() {
if attr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(attr.Value(), 10, 64)
return epoch, err == nil
}
}
return
}

View file

@ -0,0 +1,49 @@
package object_test
import (
"strconv"
"testing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
)
func TestExpirationEpoch(t *testing.T) {
obj := objecttest.Object()
var expEpoch uint64 = 42
expAttr := objectSDK.NewAttribute()
expAttr.SetKey(objectV2.SysAttributeExpEpoch)
expAttr.SetValue(strconv.FormatUint(expEpoch, 10))
t.Run("no attributes set", func(t *testing.T) {
obj.SetAttributes()
_, found := objectCore.ExpirationEpoch(obj)
require.False(t, found)
})
t.Run("no expiration epoch attribute", func(t *testing.T) {
obj.SetAttributes(*objecttest.Attribute(), *objectSDK.NewAttribute())
_, found := objectCore.ExpirationEpoch(obj)
require.False(t, found)
})
t.Run("valid expiration epoch attribute", func(t *testing.T) {
obj.SetAttributes(*objecttest.Attribute(), *expAttr, *objectSDK.NewAttribute())
epoch, found := objectCore.ExpirationEpoch(obj)
require.True(t, found)
require.Equal(t, expEpoch, epoch)
})
t.Run("invalid expiration epoch value", func(t *testing.T) {
expAttr.SetValue("-42")
obj.SetAttributes(*objecttest.Attribute(), *expAttr, *objectSDK.NewAttribute())
_, found := objectCore.ExpirationEpoch(obj)
require.False(t, found)
expAttr.SetValue("qwerty")
obj.SetAttributes(*objecttest.Attribute(), *expAttr, *objectSDK.NewAttribute())
_, found = objectCore.ExpirationEpoch(obj)
require.False(t, found)
})
}

Some files were not shown because too many files have changed in this diff Show more