Compare commits

..

No commits in common. "master" and "feat/shard-info-for-object" have entirely different histories.

156 changed files with 3803 additions and 2292 deletions

87
.ci/Jenkinsfile vendored
View file

@ -1,87 +0,0 @@
def golang = ['1.23', '1.24']
def golangDefault = "golang:${golang.last()}"
async {
for (version in golang) {
def go = version
task("test/go${go}") {
container("golang:${go}") {
sh 'make test'
}
}
task("build/go${go}") {
container("golang:${go}") {
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
sh """
make bin/frostfs-${app}
bin/frostfs-${app} --version
"""
}
}
}
}
task('test/race') {
container(golangDefault) {
sh 'make test GOFLAGS="-count=1 -race"'
}
}
task('lint') {
container(golangDefault) {
sh 'make lint-install lint'
}
}
task('staticcheck') {
container(golangDefault) {
sh 'make staticcheck-install staticcheck-run'
}
}
task('gopls') {
container(golangDefault) {
sh 'make gopls-install gopls-run'
}
}
task('gofumpt') {
container(golangDefault) {
sh '''
make fumpt-install
make fumpt
git diff --exit-code --quiet
'''
}
}
task('vulncheck') {
container(golangDefault) {
sh '''
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...
'''
}
}
task('pre-commit') {
dockerfile("""
FROM ${golangDefault}
RUN apt update && \
apt install -y --no-install-recommends pre-commit
""") {
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
}
}
}
task('dco') {
container('git.frostfs.info/truecloudlab/commit-check:master') {
sh 'FROM=pull_request_target commit-check'
}
}
}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.23', '1.24' ] go_versions: [ '1.22', '1.23' ]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.22'
- name: Run commit format checker - name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -21,7 +21,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: 1.24 go-version: 1.23
- name: Set up Python - name: Set up Python
run: | run: |
apt update apt update

View file

@ -16,7 +16,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
cache: true cache: true
- name: Install linters - name: Install linters
@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.23', '1.24' ] go_versions: [ '1.22', '1.23' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.22'
cache: true cache: true
- name: Run tests - name: Run tests
@ -68,7 +68,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
cache: true cache: true
- name: Install staticcheck - name: Install staticcheck
@ -104,7 +104,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
cache: true cache: true
- name: Install gofumpt - name: Install gofumpt

View file

@ -18,7 +18,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
check-latest: true check-latest: true
- name: Install govulncheck - name: Install govulncheck

View file

@ -22,11 +22,6 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the # 'default' case is present, even if all enum members aren't listed in the
# switch # switch
default-signifies-exhaustive: true default-signifies-exhaustive: true
gci:
sections:
- standard
- default
custom-order: true
govet: govet:
# report about shadowed variables # report about shadowed variables
check-shadowing: false check-shadowing: false
@ -77,7 +72,6 @@ linters:
- durationcheck - durationcheck
- exhaustive - exhaustive
- copyloopvar - copyloopvar
- gci
- gofmt - gofmt
- goimports - goimports
- misspell - misspell

View file

@ -1,6 +1,5 @@
#!/usr/bin/make -f #!/usr/bin/make -f
SHELL = bash SHELL = bash
.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m) REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@ -8,7 +7,7 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.23 GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2 LINT_VERSION ?= 1.62.2
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0 PROTOC_VERSION ?= 25.0
@ -17,7 +16,7 @@ PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux) ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64 PROTOC_OS_VERSION=linux-x86_64
endif endif
STATICCHECK_VERSION ?= 2025.1.1 STATICCHECK_VERSION ?= 2024.1.1
ARCH = amd64 ARCH = amd64
BIN = bin BIN = bin
@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
GOPLS_VERSION ?= v0.17.1 GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp) GOPLS_TEMP_FILE := $(shell mktemp)
@ -116,7 +115,7 @@ protoc:
# Install protoc # Install protoc
protoc-install: protoc-install:
@rm -rf $(PROTOBUF_DIR) @rm -rf $(PROTOBUF_DIR)
@mkdir -p $(PROTOBUF_DIR) @mkdir $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... " @echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@ -170,7 +169,7 @@ imports:
# Install gofumpt # Install gofumpt
fumpt-install: fumpt-install:
@rm -rf $(GOFUMPT_DIR) @rm -rf $(GOFUMPT_DIR)
@mkdir -p $(GOFUMPT_DIR) @mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt # Run gofumpt
@ -187,37 +186,14 @@ test:
@echo "⇒ Running go test" @echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./... @GOFLAGS="$(GOFLAGS)" go test ./...
# Install Gerrit commit-msg hook
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
review-install:
@git config remote.review.url \
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
@mkdir -p $(GIT_HOOK_DIR)/
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
@chmod +x $(GIT_HOOK_DIR)/commit-msg
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
# Create a PR in Gerrit
review: BRANCH ?= master
review:
@git push review HEAD:refs/for/$(BRANCH) \
--push-option r=e.stratonikov@yadro.com \
--push-option r=d.stepanov@yadro.com \
--push-option r=an.nikiforov@yadro.com \
--push-option r=a.arifullin@yadro.com \
--push-option r=ekaterina.lebedeva@yadro.com \
--push-option r=a.savchuk@yadro.com \
--push-option r=a.chuprov@yadro.com
# Run pre-commit # Run pre-commit
pre-commit-run: pre-commit-run:
@pre-commit run -a --hook-stage manual @pre-commit run -a --hook-stage manual
# Install linters # Install linters
lint-install: $(BIN) lint-install:
@rm -rf $(OUTPUT_LINT_DIR) @rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR) @mkdir $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR) @mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters @rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@ -236,7 +212,7 @@ lint:
# Install staticcheck # Install staticcheck
staticcheck-install: staticcheck-install:
@rm -rf $(STATICCHECK_DIR) @rm -rf $(STATICCHECK_DIR)
@mkdir -p $(STATICCHECK_DIR) @mkdir $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck # Run staticcheck
@ -249,7 +225,7 @@ staticcheck-run:
# Install gopls # Install gopls
gopls-install: gopls-install:
@rm -rf $(GOPLS_DIR) @rm -rf $(GOPLS_DIR)
@mkdir -p $(GOPLS_DIR) @mkdir $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls # Run gopls

View file

@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
nbuf := make([]byte, 8) nbuf := make([]byte, 8)
copy(nbuf[:], v) copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf) n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 { if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k) return helper.InvalidConfigValueErr(k)
} }
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default: default:
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
} }
} }

View file

@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" { if info.version == "" {
info.version = "unknown" info.version = "unknown"
} }
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE())) info.name, info.version, info.hash.StringLE())))
} }
_ = tw.Flush() _ = tw.Flush()

View file

@ -34,7 +34,7 @@ const (
subjectNameFlag = "subject-name" subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key" subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address" subjectAddressFlag = "subject-address"
extendedFlag = "extended" includeNamesFlag = "include-names"
groupNameFlag = "group-name" groupNameFlag = "group-name"
groupIDFlag = "group-id" groupIDFlag = "group-id"
@ -209,7 +209,7 @@ func initFrostfsIDListSubjectsCmd() {
Cmd.AddCommand(frostfsidListSubjectsCmd) Cmd.AddCommand(frostfsidListSubjectsCmd)
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
} }
func initFrostfsIDCreateGroupCmd() { func initFrostfsIDCreateGroupCmd() {
@ -256,7 +256,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
} }
func initFrostfsIDSetKVCmd() { func initFrostfsIDSetKVCmd() {
@ -336,7 +336,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
} }
func frostfsidListSubjects(cmd *cobra.Command, _ []string) { func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
extended, _ := cmd.Flags().GetBool(extendedFlag) includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd) inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash) reader := frostfsidrpclient.NewReader(inv, hash)
@ -349,19 +349,21 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
for _, addr := range subAddresses { for _, addr := range subAddresses {
if !extended { if !includeNames {
cmd.Println(address.Uint160ToString(addr)) cmd.Println(address.Uint160ToString(addr))
continue continue
} }
items, err := reader.GetSubject(addr) sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, addr, subj) cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
cmd.Println()
} }
} }
@ -481,7 +483,7 @@ func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd) groupID := getFrostfsIDGroupID(cmd)
extended, _ := cmd.Flags().GetBool(extendedFlag) includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
inv, cs, hash := initInvoker(cmd) inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
@ -499,7 +501,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
for _, subjAddr := range subjects { for _, subjAddr := range subjects {
if !extended { if !includeNames {
cmd.Println(address.Uint160ToString(subjAddr)) cmd.Println(address.Uint160ToString(subjAddr))
continue continue
} }
@ -508,8 +510,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, subjAddr, subj) cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
cmd.Println()
} }
} }
@ -599,30 +600,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
return inv, cs, nmHash return inv, cs, nmHash
} }
func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
pk := "<nil>"
if subj.PrimaryKey != nil {
pk = subj.PrimaryKey.String()
}
cmd.Printf("Primary key: %s\n", pk)
cmd.Printf("Name: %s\n", subj.Name)
cmd.Printf("Namespace: %s\n", subj.Namespace)
if len(subj.AdditionalKeys) > 0 {
cmd.Printf("Additional keys:\n")
for _, key := range subj.AdditionalKeys {
k := "<nil>"
if key != nil {
k = key.String()
}
cmd.Printf("- %s\n", k)
}
}
if len(subj.KV) > 0 {
cmd.Printf("KV:\n")
for k, v := range subj.KV {
cmd.Printf("- %s: %s\n", k, v)
}
}
}

View file

@ -6,7 +6,6 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -14,7 +13,9 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
} }
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
inv := invoker.New(c, nil) switch c.(type) {
reader := nns2.NewReader(inv, nnsHash) case *rpcclient.Client:
return reader.IsAvailable(name) inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
default:
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
}
return b, nil
}
} }
func CheckNotaryEnabled(c Client) error { func CheckNotaryEnabled(c Client) error {

View file

@ -40,8 +40,6 @@ type ClientContext struct {
CommitteeAct *actor.Actor // committee actor with the Global witness scope CommitteeAct *actor.Actor // committee actor with the Global witness scope
ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer
SentTxs []HashVUBPair SentTxs []HashVUBPair
AwaitDisabled bool
} }
func NewRemoteClient(v *viper.Viper) (Client, error) { func NewRemoteClient(v *viper.Viper) (Client, error) {
@ -122,7 +120,7 @@ func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command,
} }
func (c *ClientContext) AwaitTx(cmd *cobra.Command) error { func (c *ClientContext) AwaitTx(cmd *cobra.Command) error {
if len(c.SentTxs) == 0 || c.AwaitDisabled { if len(c.SentTxs) == 0 {
return nil return nil
} }

View file

@ -3,7 +3,6 @@ package helper
import ( import (
"errors" "errors"
"fmt" "fmt"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err return err
} }
for k, v := range m { for k, v := range m {
if slices.Contains(NetmapConfigKeys, k) { for _, key := range NetmapConfigKeys {
md[k] = v if k == key {
md[k] = v
break
}
} }
} }
return nil return nil

View file

@ -39,7 +39,6 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
return err return err
} }
initCtx.AwaitDisabled = true
cmd.Println("Stage 4.1: Transfer GAS to proxy contract.") cmd.Println("Stage 4.1: Transfer GAS to proxy contract.")
if err := transferGASToProxy(initCtx); err != nil { if err := transferGASToProxy(initCtx); err != nil {
return err return err
@ -56,10 +55,5 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
} }
cmd.Println("Stage 7: set addresses in NNS.") cmd.Println("Stage 7: set addresses in NNS.")
if err := setNNS(initCtx); err != nil { return setNNS(initCtx)
return err
}
initCtx.AwaitDisabled = false
return initCtx.AwaitTx()
} }

View file

@ -1,6 +1,7 @@
package initialize package initialize
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
@ -10,8 +11,11 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
@ -26,8 +30,7 @@ const (
) )
func registerCandidateRange(c *helper.InitializeContext, start, end int) error { func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
reader := neo.NewReader(c.ReadOnlyInvoker) regPrice, err := getCandidateRegisterPrice(c)
regPrice, err := reader.GetRegisterPrice()
if err != nil { if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err) return fmt.Errorf("can't fetch registration price: %w", err)
} }
@ -113,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash neoHash := neo.Hash
ok, err := transferNEOFinished(c) ok, err := transferNEOFinished(c, neoHash)
if ok || err != nil { if ok || err != nil {
return err return err
} }
@ -136,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx() return c.AwaitTx()
} }
func transferNEOFinished(c *helper.InitializeContext) (bool, error) { func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
r := neo.NewReader(c.ReadOnlyInvoker) r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
} }
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
switch c.Client.(type) {
case *rpcclient.Client:
inv := invoker.New(c.Client, nil)
reader := neo.NewReader(inv)
return reader.GetRegisterPrice()
default:
neoHash := neo.Hash
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
if err != nil {
return 0, err
}
if len(res.Stack) == 0 {
return 0, errGetPriceInvalid
}
bi, err := res.Stack[0].TryInteger()
if err != nil || !bi.IsInt64() {
return 0, errGetPriceInvalid
}
return bi.Int64(), nil
}
}

View file

@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
_ = tw.Flush() _ = tw.Flush()
cmd.Print(buf.String()) cmd.Print(buf.String())

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@ -40,6 +41,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd) rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd) rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))

View file

@ -0,0 +1,137 @@
package storagecfg
const configTemplate = `logger:
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
node:
wallet:
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
addresses: # list of addresses announced by Storage node in the Network map
- {{ .AnnouncedAddress }}
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
grpc:
num: 1 # total number of listener endpoints
0:
endpoint: {{ .Endpoint }} # endpoint for gRPC server
tls:{{if .TLSCert}}
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
certificate: {{ .TLSCert }} # path to TLS certificate
key: {{ .TLSKey }} # path to TLS key
{{- else }}
enabled: false # disable TLS for a gRPC connection
{{- end}}
control:
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
{{- range .AuthorizedKeys }}
- {{.}}{{end}}
grpc:
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
morph:
dial_timeout: 20s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # use TTL cache for side chain GET operations
rpc_endpoint: # side chain N3 RPC endpoints
{{- range .MorphRPC }}
- address: wss://{{.}}/ws{{end}}
{{if not .Relay }}
storage:
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard:
default: # section with the default shard parameters
metabase:
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
blobstor:
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 2 # max depth of object tree storage in FS
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
compress: true # turn on/off Zstandard compression (level 3) of stored objects
compression_exclude_content_types:
- audio/*
- video/*
blobovnicza:
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
gc:
remover_batch_size: 200 # number of objects to be removed by the garbage collector
remover_sleep_interval: 5m # frequency of the garbage collector invocation
0:
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
metabase:
path: {{ .MetabasePath }} # path to the metabase
blobstor:
path: {{ .BlobstorPath }} # path to the blobstor
{{end}}`
const (
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
)
var n3config = map[string]struct {
MorphRPC []string
RPC []string
NeoFSContract string
BalanceContract string
}{
"testnet": {
MorphRPC: []string{
"rpc01.morph.testnet.fs.neo.org:51331",
"rpc02.morph.testnet.fs.neo.org:51331",
"rpc03.morph.testnet.fs.neo.org:51331",
"rpc04.morph.testnet.fs.neo.org:51331",
"rpc05.morph.testnet.fs.neo.org:51331",
"rpc06.morph.testnet.fs.neo.org:51331",
"rpc07.morph.testnet.fs.neo.org:51331",
},
RPC: []string{
"rpc01.testnet.n3.nspcc.ru:21331",
"rpc02.testnet.n3.nspcc.ru:21331",
"rpc03.testnet.n3.nspcc.ru:21331",
"rpc04.testnet.n3.nspcc.ru:21331",
"rpc05.testnet.n3.nspcc.ru:21331",
"rpc06.testnet.n3.nspcc.ru:21331",
"rpc07.testnet.n3.nspcc.ru:21331",
},
NeoFSContract: neofsTestnetAddress,
BalanceContract: balanceTestnetAddress,
},
"mainnet": {
MorphRPC: []string{
"rpc1.morph.fs.neo.org:40341",
"rpc2.morph.fs.neo.org:40341",
"rpc3.morph.fs.neo.org:40341",
"rpc4.morph.fs.neo.org:40341",
"rpc5.morph.fs.neo.org:40341",
"rpc6.morph.fs.neo.org:40341",
"rpc7.morph.fs.neo.org:40341",
},
RPC: []string{
"rpc1.n3.nspcc.ru:10331",
"rpc2.n3.nspcc.ru:10331",
"rpc3.n3.nspcc.ru:10331",
"rpc4.n3.nspcc.ru:10331",
"rpc5.n3.nspcc.ru:10331",
"rpc6.n3.nspcc.ru:10331",
"rpc7.n3.nspcc.ru:10331",
},
NeoFSContract: neofsMainnetAddress,
BalanceContract: balanceMainnetAddress,
},
}

View file

@ -0,0 +1,433 @@
package storagecfg
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
"time"
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/chzyer/readline"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
)
const (
walletFlag = "wallet"
accountFlag = "account"
)
const (
defaultControlEndpoint = "localhost:8090"
defaultDataEndpoint = "localhost"
)
// RootCmd is a root command of config section.
var RootCmd = &cobra.Command{
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
Short: "Section for storage node configuration commands",
Run: storageConfig,
}
func init() {
fs := RootCmd.Flags()
fs.StringP(walletFlag, "w", "", "Path to wallet")
fs.StringP(accountFlag, "a", "", "Wallet account")
}
type config struct {
AnnouncedAddress string
AuthorizedKeys []string
ControlEndpoint string
Endpoint string
TLSCert string
TLSKey string
MorphRPC []string
Attribute struct {
Locode string
}
Wallet struct {
Path string
Account string
Password string
}
Relay bool
BlobstorPath string
MetabasePath string
}
func storageConfig(cmd *cobra.Command, args []string) {
outPath := getOutputPath(args)
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
readline.SetHistoryPath(historyPath)
var c config
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
if c.Wallet.Path == "" {
c.Wallet.Path = getPath("Path to the storage node wallet: ")
}
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
fatalOnErr(err)
fillWalletAccount(cmd, &c, w)
accH, err := flags.ParseAddress(c.Wallet.Account)
fatalOnErr(err)
acc := w.GetAccount(accH)
if acc == nil {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
fatalOnErr(err)
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
network := readNetwork(cmd)
c.MorphRPC = n3config[network].MorphRPC
depositGas(cmd, acc, network)
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
endpoint := getDefaultEndpoint(cmd, &c)
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
if c.Endpoint == "" {
c.Endpoint = endpoint
}
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
if c.ControlEndpoint == "" {
c.ControlEndpoint = defaultControlEndpoint
}
c.TLSCert = getPath("TLS Certificate (optional): ")
if c.TLSCert != "" {
c.TLSKey = getPath("TLS Key: ")
}
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
if !c.Relay {
p := getPath("Path to the storage directory (all available storage will be used): ")
c.BlobstorPath = filepath.Join(p, "blob")
c.MetabasePath = filepath.Join(p, "meta")
}
out := applyTemplate(c)
fatalOnErr(os.WriteFile(outPath, out, 0o644))
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
}
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
var addr, port string
for {
c.AnnouncedAddress = getString("Publicly announced address: ")
validator := netutil.Address{}
err := validator.FromString(c.AnnouncedAddress)
if err != nil {
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
continue
}
uriAddr, err := url.Parse(validator.URIAddr())
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
addr = uriAddr.Hostname()
port = uriAddr.Port()
ip, err := net.ResolveIPAddr("ip", addr)
if err != nil {
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
continue
}
if !ip.IP.IsGlobalUnicast() {
cmd.Println("IP must be global unicast.")
continue
}
cmd.Printf("Resolved IP address: %s\n", ip.String())
_, err = strconv.ParseUint(port, 10, 16)
if err != nil {
cmd.Println("Port must be an integer.")
continue
}
break
}
return net.JoinHostPort(defaultDataEndpoint, port)
}
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
if c.Wallet.Account == "" {
addr := address.Uint160ToString(w.GetChangeAddress())
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
if c.Wallet.Account == "" {
c.Wallet.Account = addr
}
}
}
func readNetwork(cmd *cobra.Command) string {
var network string
for {
network = getString("Choose network [mainnet]/testnet: ")
switch network {
case "":
network = "mainnet"
case "testnet", "mainnet":
default:
cmd.Println(`Network must be either "mainnet" or "testnet"`)
continue
}
break
}
return network
}
func getOutputPath(args []string) string {
if len(args) != 0 {
return args[0]
}
outPath := getPath("File to write config at [./config.yml]: ")
if outPath == "" {
outPath = "./config.yml"
}
return outPath
}
func getWalletAccount(w *wallet.Wallet, prompt string) string {
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
for i := range w.Accounts {
addrs[i] = readline.PcItem(w.Accounts[i].Address)
}
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
defer readline.SetAutoComplete(nil)
s, err := readline.Line(prompt)
fatalOnErr(err)
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
}
func getString(prompt string) string {
s, err := readline.Line(prompt)
fatalOnErr(err)
if s != "" {
_ = readline.AddHistory(s)
}
return s
}
type filenameCompleter struct{}
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
prefix := string(line[:pos])
dir := filepath.Dir(prefix)
de, err := os.ReadDir(dir)
if err != nil {
return nil, 0
}
for i := range de {
name := filepath.Join(dir, de[i].Name())
if strings.HasPrefix(name, prefix) {
tail := []rune(strings.TrimPrefix(name, prefix))
if de[i].IsDir() {
tail = append(tail, filepath.Separator)
}
newLine = append(newLine, tail)
}
}
if pos != 0 {
return newLine, pos - len([]rune(dir))
}
return newLine, 0
}
func getPath(prompt string) string {
readline.SetAutoComplete(filenameCompleter{})
defer readline.SetAutoComplete(nil)
p, err := readline.Line(prompt)
fatalOnErr(err)
if p == "" {
return p
}
_ = readline.AddHistory(p)
abs, err := filepath.Abs(p)
if err != nil {
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
}
return abs
}
func getConfirmation(def bool, prompt string) bool {
for {
s, err := readline.Line(prompt)
fatalOnErr(err)
switch strings.ToLower(s) {
case "y", "yes":
return true
case "n", "no":
return false
default:
if len(s) == 0 {
return def
}
}
}
}
func applyTemplate(c config) []byte {
tmpl, err := template.New("config").Parse(configTemplate)
fatalOnErr(err)
b := bytes.NewBuffer(nil)
fatalOnErr(tmpl.Execute(b, c))
return b.Bytes()
}
func fatalOnErr(err error) {
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
sideClient := initClient(n3config[network].MorphRPC)
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
sideActor, err := actor.NewSimple(sideClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
}
sideGas := nep17.NewReader(sideActor, balanceHash)
accSH := acc.Contract.ScriptHash()
balance, err := sideGas.BalanceOf(accSH)
if err != nil {
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
}
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
fixedn.ToString(balance, 12)))
if !ok {
return
}
amountStr := getString("Enter amount in GAS: ")
amount, err := fixedn.FromString(amountStr, 8)
if err != nil {
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
}
mainClient := initClient(n3config[network].RPC)
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
mainActor, err := actor.NewSimple(mainClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
}
mainGas := nep17.New(mainActor, gas.Hash)
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
if err != nil {
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
}
cmd.Print("Waiting for transactions to persist.")
tick := time.NewTicker(time.Second / 2)
defer tick.Stop()
timer := time.NewTimer(time.Second * 20)
defer timer.Stop()
at := trigger.Application
loop:
for {
select {
case <-tick.C:
_, err := mainClient.GetApplicationLog(txHash, &at)
if err == nil {
cmd.Print("\n")
break loop
}
cmd.Print(".")
case <-timer.C:
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
return
}
os.Exit(1)
}
}
}
func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
DialTimeout: time.Second * 2,
RequestTimeout: time.Second * 5,
})
if err != nil {
continue
}
if err = c.Init(); err != nil {
continue
}
return c
}
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
panic("unreachable")
}

View file

@ -858,8 +858,6 @@ type PatchObjectPrm struct {
ReplaceAttribute bool ReplaceAttribute bool
NewSplitHeader *objectSDK.SplitHeader
PayloadPatches []PayloadPatch PayloadPatches []PayloadPatch
} }
@ -890,11 +888,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err) return nil, fmt.Errorf("init payload reading: %w", err)
} }
if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
NewSplitHeader: prm.NewSplitHeader,
NewAttributes: prm.NewAttributes,
ReplaceAttributes: prm.ReplaceAttribute,
}) {
for _, pp := range prm.PayloadPatches { for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil { if err != nil {

View file

@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{ prmDial := client.PrmDial{
Endpoint: addr.URIAddr(), Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{ GRPCDialOptions: []grpc.DialOption{
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
}, },

View file

@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n")) _, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets { for i, t := range targets {
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
} }
_ = tw.Flush() _ = tw.Flush()
cmd.Print(buf.String()) cmd.Print(buf.String())

View file

@ -11,6 +11,7 @@ import (
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58" "github.com/mr-tron/base58"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
var sealWritecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{
Use: "seal", Use: "seal",
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
Run: sealWritecache, Run: sealWritecache,
} }

View file

@ -2,7 +2,6 @@ package object
import ( import (
"fmt" "fmt"
"os"
"strconv" "strconv"
"strings" "strings"
@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -22,7 +20,6 @@ const (
replaceAttrsFlagName = "replace-attrs" replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range" rangeFlagName = "range"
payloadFlagName = "payload" payloadFlagName = "payload"
splitHeaderFlagName = "split-header"
) )
var objectPatchCmd = &cobra.Command{ var objectPatchCmd = &cobra.Command{
@ -53,7 +50,6 @@ func initObjectPatchCmd() {
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
} }
func patch(cmd *cobra.Command, _ []string) { func patch(cmd *cobra.Command, _ []string) {
@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs prm.ReplaceAttribute = replaceAttrs
prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
for i := range ranges { for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i], Range: ranges[i],
@ -153,22 +147,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName) v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v return v
} }
func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
path, _ := cmd.Flags().GetString(splitHeaderFlagName)
if path == "" {
return nil
}
data, err := os.ReadFile(path)
commonCmd.ExitOnErr(cmd, "read file error: %w", err)
splitHdrV2 := new(objectV2.SplitHeader)
err = splitHdrV2.Unmarshal(data)
if err != nil {
err = splitHdrV2.UnmarshalJSON(data)
commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
}
return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
}

View file

@ -2,19 +2,17 @@ package tree
import ( import (
"context" "context"
"crypto/tls"
"fmt" "fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
) )
@ -33,29 +31,22 @@ func _client() (tree.TreeServiceClient, error) {
return nil, err return nil, err
} }
host, isTLS, err := client.ParseURI(netAddr.URIAddr())
if err != nil {
return nil, err
}
creds := insecure.NewCredentials()
if isTLS {
creds = credentials.NewTLS(&tls.Config{})
}
opts := []grpc.DialOption{ opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor( grpc.WithChainUnaryInterceptor(
tracing.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(),
), ),
grpc.WithChainStreamInterceptor( grpc.WithChainStreamInterceptor(
tracing.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(),
), ),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(), grpc.WithDisableServiceConfig(),
grpc.WithTransportCredentials(creds),
} }
cc, err := grpc.NewClient(host, opts...) if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err return tree.NewTreeServiceClient(cc), err
} }

View file

@ -9,7 +9,6 @@ import (
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/viper" "github.com/spf13/viper"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -39,14 +38,13 @@ func reloadConfig() error {
} }
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled")) audit.Store(cfg.GetBool("audit.enabled"))
var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level")) err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil { if err != nil {
return err return err
} }
log.Reload(logPrm) logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
return nil return logPrm.Reload()
} }
func watchForSignal(ctx context.Context, cancel func()) { func watchForSignal(ctx context.Context, cancel func()) {

View file

@ -31,6 +31,7 @@ const (
var ( var (
wg = new(sync.WaitGroup) wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors intErr = make(chan error) // internal inner ring errors
logPrm = new(logger.Prm)
innerRing *innerring.Server innerRing *innerring.Server
pprofCmp *pprofComponent pprofCmp *pprofComponent
metricsCmp *httpComponent metricsCmp *httpComponent
@ -69,7 +70,6 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics() metrics := irMetrics.NewInnerRingMetrics()
var logPrm logger.Prm
err = logPrm.SetLevelString( err = logPrm.SetLevelString(
cfg.GetString("logger.level"), cfg.GetString("logger.level"),
) )

View file

@ -1,8 +1,6 @@
package tui package tui
import ( import (
"slices"
"github.com/gdamore/tcell/v2" "github.com/gdamore/tcell/v2"
"github.com/rivo/tview" "github.com/rivo/tview"
) )
@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent. // Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
f.history = append(f.history, s) f.history = append(f.history, s)
} }

View file

@ -108,7 +108,6 @@ type applicationConfiguration struct {
level string level string
destination string destination string
timestamp bool timestamp bool
options []zap.Option
} }
ObjectCfg struct { ObjectCfg struct {
@ -118,6 +117,7 @@ type applicationConfiguration struct {
EngineCfg struct { EngineCfg struct {
errorThreshold uint32 errorThreshold uint32
shardPoolSize uint32
shards []shardCfg shards []shardCfg
lowMem bool lowMem bool
} }
@ -233,14 +233,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
var opts []zap.Option
if loggerconfig.ToLokiConfig(c).Enabled {
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
return lokiCore
})}
}
a.LoggerCfg.options = opts
// Object // Object
@ -258,6 +250,7 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
// Storage Engine // Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
@ -482,6 +475,7 @@ type shared struct {
// dynamicConfiguration stores parameters of the // dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations. // components that supports runtime reconfigurations.
type dynamicConfiguration struct { type dynamicConfiguration struct {
logger *logger.Prm
pprof *httpComponent pprof *httpComponent
metrics *httpComponent metrics *httpComponent
} }
@ -722,11 +716,16 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector netState.metrics = c.metricsCollector
logPrm, err := c.loggerPrm() logPrm := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm) log, err := logger.NewLogger(logPrm)
fatalOnErr(err) fatalOnErr(err)
if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
}
c.internals = initInternals(appCfg, log) c.internals = initInternals(appCfg, log)
@ -894,6 +893,7 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option var opts []engine.Option
opts = append(opts, opts = append(opts,
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold), engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log), engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
@ -933,7 +933,6 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync), writecache.WithNoSync(wcRead.noSync),
writecache.WithLogger(c.log), writecache.WithLogger(c.log),
writecache.WithQoSLimiter(shCfg.limiter),
) )
} }
return writeCacheOpts return writeCacheOpts
@ -1049,7 +1048,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
} }
if c.metricsCollector != nil { if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
} }
var sh shardOptsWithID var sh shardOptsWithID
@ -1079,23 +1077,26 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh return sh
} }
func (c *cfg) loggerPrm() (logger.Prm, error) { func (c *cfg) loggerPrm() *logger.Prm {
var prm logger.Prm // check if it has been inited before
// (re)init read configuration if c.dynamicConfiguration.logger == nil {
err := prm.SetLevelString(c.LoggerCfg.level) c.dynamicConfiguration.logger = new(logger.Prm)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
} }
err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
prm.PrependTimestamp = c.LoggerCfg.timestamp
prm.Options = c.LoggerCfg.options
return prm, nil // (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log level format: " + c.LoggerCfg.level)
}
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
} }
func (c *cfg) LocalAddress() network.AddressGroup { func (c *cfg) LocalAddress() network.AddressGroup {
@ -1335,7 +1336,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support // all the components are expected to support
// Logger's dynamic reconfiguration approach // Logger's dynamic reconfiguration approach
components := c.getComponents(ctx) // Logger
logPrm := c.loggerPrm()
components := c.getComponents(ctx, logPrm)
// Object // Object
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
@ -1373,17 +1378,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
} }
func (c *cfg) getComponents(ctx context.Context) []dCmp { func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
var components []dCmp var components []dCmp
components = append(components, dCmp{"logger", func() error { components = append(components, dCmp{"logger", logPrm.Reload})
prm, err := c.loggerPrm()
if err != nil {
return err
}
c.log.Reload(prm)
return nil
}})
components = append(components, dCmp{"runtime", func() error { components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(ctx, c) setRuntimeParameters(ctx, c)
return nil return nil

View file

@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) { func TestConfigDir(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
cfgFileName := path.Join(dir, "cfg_01.yml") cfgFileName0 := path.Join(dir, "cfg_00.json")
cfgFileName1 := path.Join(dir, "cfg_01.yml")
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "") c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
} }

View file

@ -11,6 +11,10 @@ import (
const ( const (
subsection = "storage" subsection = "storage"
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
) )
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found. // ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil return nil
} }
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
//
// Returns ShardPoolSizeDefault if the value is not a positive number.
func ShardPoolSize(c *config.Config) uint32 {
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
if v > 0 {
return v
}
return ShardPoolSizeDefault
}
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
// //
// Returns 0 if the the value is missing. // Returns 0 if the the value is missing.

View file

@ -54,6 +54,7 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled) require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
}) })
@ -63,6 +64,7 @@ func TestEngineSection(t *testing.T) {
num := 0 num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() { defer func() {
@ -168,10 +170,9 @@ func TestEngineSection(t *testing.T) {
LimitOps: toPtr(25000), LimitOps: toPtr(25000),
}, },
{ {
Tag: "policer", Tag: "policer",
Weight: toPtr(5), Weight: toPtr(5),
LimitOps: toPtr(25000), LimitOps: toPtr(25000),
Prohibited: true,
}, },
}) })
require.ElementsMatch(t, writeLimits.Tags, require.ElementsMatch(t, writeLimits.Tags,

View file

@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration { func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay") d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0) if d < 0 {
d = 0
}
return d
} }
// MaxBatchSize returns the value of "max_batch_size" config parameter. // MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int { func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0) if s < 0 {
s = 0
}
return s
} }
// NoSync returns the value of "no_sync" config parameter. // NoSync returns the value of "no_sync" config parameter.
@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int { func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
return max(s, 0) if s < 0 {
s = 0
}
return s
} }

View file

@ -84,7 +84,6 @@ type IOTagConfig struct {
Weight *float64 Weight *float64
LimitOps *float64 LimitOps *float64
ReservedOps *float64 ReservedOps *float64
Prohibited bool
} }
func tags(c *config.Config) []IOTagConfig { func tags(c *config.Config) []IOTagConfig {
@ -120,13 +119,6 @@ func tags(c *config.Config) []IOTagConfig {
tagConfig.ReservedOps = &r tagConfig.ReservedOps = &r
} }
v = c.Value(strconv.Itoa(i) + ".prohibited")
if v != nil {
r, err := cast.ToBoolE(v)
panicOnErr(err)
tagConfig.Prohibited = r
}
result = append(result, tagConfig) result = append(result, tagConfig)
} }
} }

View file

@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration { func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay") d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0) if d <= 0 {
d = 0
}
return d
} }
// MaxBatchSize returns the value of "max_batch_size" config parameter. // MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int { func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0) if s <= 0 {
s = 0
}
return s
} }

View file

@ -31,11 +31,12 @@ func Limits(c *config.Config) []LimitConfig {
break break
} }
if sc.Value("max_ops") == nil { maxOps := config.IntSafe(sc, "max_ops")
if maxOps == 0 {
panic("no max operations for method group") panic("no max operations for method group")
} }
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) limits = append(limits, LimitConfig{methods, maxOps})
} }
return limits return limits

View file

@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) {
}) })
t.Run("no max operations", func(t *testing.T) { t.Run("no max operations", func(t *testing.T) {
const path = "testdata/no_max_ops" const path = "testdata/node"
fileConfigTest := func(c *config.Config) { fileConfigTest := func(c *config.Config) {
require.Panics(t, func() { _ = Limits(c) }) require.Panics(t, func() { _ = Limits(c) })
@ -50,28 +50,4 @@ func TestRPCSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest) configtest.ForEnvFileType(t, path, fileConfigTest)
}) })
}) })
t.Run("zero max operations", func(t *testing.T) {
const path = "testdata/zero_max_ops"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(0))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
} }

View file

@ -1,4 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,19 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
],
"max_ops": 0
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,9 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 0
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -16,6 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
@ -171,10 +172,12 @@ func initObjectService(c *cfg) {
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
apeSvc := createAPEService(c, &irFetcher, splitSvc) apeSvc := createAPEService(c, splitSvc)
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
var commonSvc objectService.Common var commonSvc objectService.Common
commonSvc.Init(&c.internals, apeSvc) commonSvc.Init(&c.internals, aclSvc)
respSvc := objectService.NewResponseService( respSvc := objectService.NewResponseService(
&commonSvc, &commonSvc,
@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
}) })
} }
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
return &innerRingFetcherWithNotary{ return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client, sidechain: c.cfgMorph.client,
} }
@ -426,7 +429,17 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
) )
} }
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
return v2.New(
apeSvc,
c.netMapSource,
irFetcher,
c.cfgObject.cnrSource,
v2.WithLogger(c.log),
)
}
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService( return objectAPE.NewService(
objectAPE.NewChecker( objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
@ -438,7 +451,6 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
c.cfgObject.cnrSource, c.cfgObject.cnrSource,
c.binPublicKey, c.binPublicKey,
), ),
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
splitSvc, splitSvc,
) )
} }

View file

@ -43,14 +43,11 @@ func initQoSService(c *cfg) {
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx) rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined { if !defined {
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
} }
ioTag, err := qos.FromRawString(rawTag) ioTag, err := qos.FromRawString(rawTag)
if err != nil { if err != nil {
s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
} }
@ -73,36 +70,26 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
return ctx return ctx
} }
} }
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal: case qos.IOTagInternal:
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { for _, pk := range s.allowedInternalPubs {
return ctx if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
} }
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default: default:
s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
} }
} }
func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, publicKey) {
return true
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return false
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), publicKey) {
return true
}
}
return false
}

View file

@ -1,226 +0,0 @@
package main
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSService_Client(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag client defined", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func TestQoSService_Internal(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
}
func TestQoSService_Critical(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
}
func TestQoSService_NetmapGetError(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
s.netmapSource = &utilTesting.TestNetmapSource{}
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
nmSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
reqSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedCritSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedIntSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
var node netmap.NodeInfo
node.SetPublicKey(nmSigner.PublicKey().Bytes())
nm := &netmap.NetMap{}
nm.SetEpoch(100)
nm.SetNodes([]netmap.NodeInfo{node})
return &cfgQoSService{
logger: test.NewLogger(t),
netmapSource: &utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
100: nm,
},
CurrentEpoch: 100,
},
allowedCriticalPubs: [][]byte{
allowedCritSigner.PublicKey().Bytes(),
},
allowedInternalPubs: [][]byte{
allowedIntSigner.PublicKey().Bytes(),
},
},
&testQoSServicePublicKeys{
NetmapNode: nmSigner.PublicKey().Bytes(),
Request: reqSigner.PublicKey().Bytes(),
Internal: allowedIntSigner.PublicKey().Bytes(),
Critical: allowedCritSigner.PublicKey().Bytes(),
}
}
type testQoSServicePublicKeys struct {
NetmapNode []byte
Request []byte
Internal []byte
Critical []byte
}

View file

@ -1,6 +1,7 @@
package main package main
import ( import (
"os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
}) })
}) })
t.Run("mainnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
t.Run("testnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
} }

View file

@ -97,6 +97,7 @@ FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section # Storage engine section
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard ## 0 shard
### Flag to refill Metabase from BlobStor ### Flag to refill Metabase from BlobStor
@ -180,7 +181,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0

View file

@ -158,6 +158,7 @@
] ]
}, },
"storage": { "storage": {
"shard_pool_size": 15,
"shard_ro_error_threshold": 100, "shard_ro_error_threshold": 100,
"shard": { "shard": {
"0": { "0": {
@ -252,8 +253,7 @@
{ {
"tag": "policer", "tag": "policer",
"weight": 5, "weight": 5,
"limit_ops": 25000, "limit_ops": 25000
"prohibited": true
} }
] ]
}, },

View file

@ -135,6 +135,7 @@ rpc:
storage: storage:
# note: shard configuration can be omitted for relay node (see `node.relay`) # note: shard configuration can be omitted for relay node (see `node.relay`)
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard: shard:
@ -148,7 +149,7 @@ storage:
flush_worker_count: 30 # number of write-cache flusher threads flush_worker_count: 30 # number of write-cache flusher threads
metabase: metabase:
perm: 0o644 # permissions for metabase files(directories: +x for current user and group) perm: 0644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200 max_batch_size: 200
max_batch_delay: 20ms max_batch_delay: 20ms
@ -161,13 +162,13 @@ storage:
blobstor: blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) - perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS depth: 5 # max depth of object tree storage in FS
gc: gc:
@ -249,7 +250,6 @@ storage:
- tag: policer - tag: policer
weight: 5 weight: 5
limit_ops: 25000 limit_ops: 25000
prohibited: true
write: write:
max_running_ops: 1000 max_running_ops: 1000
max_waiting_ops: 100 max_waiting_ops: 100
@ -291,7 +291,7 @@ storage:
pilorama: pilorama:
path: tmp/1/blob/pilorama.db path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
perm: 0o644 # permission to use for the database file and intermediate directories perm: 0644 # permission to use for the database file and intermediate directories
tracing: tracing:
enabled: true enabled: true

28
config/mainnet/README.md Normal file
View file

@ -0,0 +1,28 @@
# N3 Mainnet Storage node configuration
Here is a template for simple storage node configuration in N3 Mainnet.
Make sure to specify correct values instead of `<...>` placeholders.
Do not change `contracts` section. Run the latest frostfs-node release with
the fixed config `frostfs-node -c config.yml`
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
## Tips
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
```yaml
node:
addresses:
- grpcs://frostfs.my.org:8080
grpc:
num: 1
0:
endpoint: frostfs.my.org:8080
tls:
enabled: true
certificate: /path/to/cert
key: /path/to/key
```

70
config/mainnet/config.yml Normal file
View file

@ -0,0 +1,70 @@
node:
wallet:
path: <path/to/wallet>
address: <address-in-wallet>
password: <password>
addresses:
- <announced.address:port>
attribute_0: UN-LOCODE:<XX YYY>
attribute_1: Price:100000
attribute_2: User-Agent:FrostFS\/0.9999
grpc:
num: 1
0:
endpoint: <listen.local.address:port>
tls:
enabled: false
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/path/metabase
perm: 0600
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m
logger:
level: info
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
object:
put:
remote_pool_size: 100
local_pool_size: 100
morph:
rpc_endpoint:
- wss://rpc1.morph.frostfs.info:40341/ws
- wss://rpc2.morph.frostfs.info:40341/ws
- wss://rpc3.morph.frostfs.info:40341/ws
- wss://rpc4.morph.frostfs.info:40341/ws
- wss://rpc5.morph.frostfs.info:40341/ws
- wss://rpc6.morph.frostfs.info:40341/ws
- wss://rpc7.morph.frostfs.info:40341/ws
dial_timeout: 20s
contracts:
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1

129
config/testnet/README.md Normal file
View file

@ -0,0 +1,129 @@
# N3 Testnet Storage node configuration
There is a prepared configuration for NeoFS Storage Node deployment in
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
docker image and run it with docker-compose.
## Build image
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
However, if you need to rebuild it for some reason, run
`make image-storage-testnet` command.
```
$ make image-storage-testnet
...
Successfully built ab0557117b02
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
```
## Deploy node
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
update docker-compose file and start the node.
### Deposit
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
First, obtain GAS in N3 Testnet chain. You can do that with
[faucet](https://neowish.ngd.network) service.
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
You can provide scripthash in the `data` argument of transfer tx to make a
deposit to a specified account. Otherwise, deposit is made to the tx sender.
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
See a deposit example with `neo-go`.
```
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
--token GAS \
--amount 1
```
### Configure
Next, configure `node_config.env` file. Change endpoints values. Both
should contain your **public** IP.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
```
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
attribute.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
```
You can validate UN/LOCODE attribute in
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
with frostfs-cli.
```
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
Country: Russia
Location: Saint Petersburg (ex Leningrad)
Continent: Europe
Subdivision: [SPE] Sankt-Peterburg
Coordinates: 59.53, 30.15
```
It is recommended to pass the node's key as a file. To do so, convert your wallet
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
```
// Print WIF in a 32-byte hex format
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
// Save 32-byte hex into a file
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
```
Then, specify the path to this file in `docker-compose.yml`
```yaml
volumes:
- frostfs_storage:/storage
- ./my_wallet.key:/node.key
```
NeoFS objects will be stored on your machine. By default, docker-compose
is configured to store objects in named docker volume `frostfs_storage`. You can
specify a directory on the filesystem to store objects there.
```yaml
volumes:
- /home/username/frostfs/rc3/storage:/storage
- ./my_wallet.key:/node.key
```
### Start
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
### Debug
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
log, set up log level to debug with this env:
```yaml
environment:
- NEOFS_LOGGER_LEVEL=debug
```

52
config/testnet/config.yml Normal file
View file

@ -0,0 +1,52 @@
logger:
level: info
morph:
rpc_endpoint:
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
dial_timeout: 20s
contracts:
balance: e0420c216003747626670d1424569c17c79015bf
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
node:
key: /node.key
attribute_0: Deployed:SelfHosted
attribute_1: User-Agent:FrostFS\/0.9999
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/metabase
perm: 0777
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m

View file

@ -51,7 +51,10 @@ However, all mode changing operations are idempotent.
## Automatic mode changes ## Automatic mode changes
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
2. If shard error counter exceeds threshold.
3. If the metabase couldn't be reopened during SIGHUP handling.
# Detach shard # Detach shard

View file

@ -170,6 +170,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description | | Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| |----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | | `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | | `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | | `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@ -209,7 +210,7 @@ blobstor:
width: 4 width: 4
- type: fstree - type: fstree
path: /path/to/blobstor/blobovnicza path: /path/to/blobstor/blobovnicza
perm: 0o644 perm: 0644
size: 4194304 size: 4194304
depth: 1 depth: 1
width: 4 width: 4
@ -269,7 +270,7 @@ gc:
```yaml ```yaml
metabase: metabase:
path: /path/to/meta.db path: /path/to/meta.db
perm: 0o644 perm: 0644
max_batch_size: 200 max_batch_size: 200
max_batch_delay: 20ms max_batch_delay: 20ms
``` ```
@ -359,7 +360,6 @@ limits:
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | | `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | | `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | | `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section # `node` section

10
go.mod
View file

@ -1,15 +1,15 @@
module git.frostfs.info/TrueCloudLab/frostfs-node module git.frostfs.info/TrueCloudLab/frostfs-node
go 1.23 go 1.22
require ( require (
code.gitea.io/sdk/gitea v0.17.1 code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b

16
go.sum
View file

@ -4,14 +4,14 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 h1:QnAt5b2R6+hQthMOIn5ECfLAlVD8IAE5JRm1NCCOmuE=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 h1:zM2l316J55h9p30snl6vHBI/h0xmnuqZjnxIjRDtJZw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=

View file

@ -512,7 +512,5 @@ const (
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
FailedToParseIncomingIOTag = "failed to parse incoming IO tag" FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
) )

View file

@ -23,7 +23,6 @@ const (
policerSubsystem = "policer" policerSubsystem = "policer"
commonCacheSubsystem = "common_cache" commonCacheSubsystem = "common_cache"
multinetSubsystem = "multinet" multinetSubsystem = "multinet"
qosSubsystem = "qos"
successLabel = "success" successLabel = "success"
shardIDLabel = "shard_id" shardIDLabel = "shard_id"
@ -44,7 +43,6 @@ const (
hitLabel = "hit" hitLabel = "hit"
cacheLabel = "cache" cacheLabel = "cache"
sourceIPLabel = "source_ip" sourceIPLabel = "source_ip"
ioTagLabel = "io_tag"
readWriteMode = "READ_WRITE" readWriteMode = "READ_WRITE"
readOnlyMode = "READ_ONLY" readOnlyMode = "READ_ONLY"

View file

@ -26,7 +26,6 @@ type NodeMetrics struct {
morphCache *morphCacheMetrics morphCache *morphCacheMetrics
log logger.LogMetrics log logger.LogMetrics
multinet *multinetMetrics multinet *multinetMetrics
qos *QoSMetrics
// nolint: unused // nolint: unused
appInfo *ApplicationInfo appInfo *ApplicationInfo
} }
@ -56,7 +55,6 @@ func NewNodeMetrics() *NodeMetrics {
log: logger.NewLogMetrics(namespace), log: logger.NewLogMetrics(namespace),
appInfo: NewApplicationInfo(misc.Version), appInfo: NewApplicationInfo(misc.Version),
multinet: newMultinetMetrics(namespace), multinet: newMultinetMetrics(namespace),
qos: newQoSMetrics(),
} }
} }
@ -128,7 +126,3 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
return m.multinet return m.multinet
} }
func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
return m.qos
}

View file

@ -9,14 +9,13 @@ import (
) )
type ObjectServiceMetrics interface { type ObjectServiceMetrics interface {
AddRequestDuration(method string, d time.Duration, success bool, ioTag string) AddRequestDuration(method string, d time.Duration, success bool)
AddPayloadSize(method string, size int) AddPayloadSize(method string, size int)
} }
type objectServiceMetrics struct { type objectServiceMetrics struct {
methodDuration *prometheus.HistogramVec methodDuration *prometheus.HistogramVec
payloadCounter *prometheus.CounterVec payloadCounter *prometheus.CounterVec
ioTagOpsCounter *prometheus.CounterVec
} }
func newObjectServiceMetrics() *objectServiceMetrics { func newObjectServiceMetrics() *objectServiceMetrics {
@ -33,24 +32,14 @@ func newObjectServiceMetrics() *objectServiceMetrics {
Name: "request_payload_bytes", Name: "request_payload_bytes",
Help: "Object Service request payload", Help: "Object Service request payload",
}, []string{methodLabel}), }, []string{methodLabel}),
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: objectSubsystem,
Name: "requests_total",
Help: "Count of requests for each IO tag",
}, []string{methodLabel, ioTagLabel}),
} }
} }
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
m.methodDuration.With(prometheus.Labels{ m.methodDuration.With(prometheus.Labels{
methodLabel: method, methodLabel: method,
successLabel: strconv.FormatBool(success), successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds()) }).Observe(d.Seconds())
m.ioTagOpsCounter.With(prometheus.Labels{
ioTagLabel: ioTag,
methodLabel: method,
}).Inc()
} }
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {

View file

@ -1,52 +0,0 @@
package metrics
import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
"github.com/prometheus/client_golang/prometheus"
)
type QoSMetrics struct {
opsCounter *prometheus.GaugeVec
}
func newQoSMetrics() *QoSMetrics {
return &QoSMetrics{
opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: qosSubsystem,
Name: "operations_total",
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
}
}
func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "pending",
}).Set(float64(pending))
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "in_progress",
}).Set(float64(inProgress))
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "completed",
}).Set(float64(completed))
m.opsCounter.With(prometheus.Labels{
shardIDLabel: shardID,
operationLabel: operation,
ioTagLabel: tag,
typeLabel: "resource_exhausted",
}).Set(float64(resourceExhausted))
}
func (m *QoSMetrics) Close(shardID string) {
m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
}

View file

@ -12,14 +12,12 @@ type TreeMetricsRegister interface {
AddReplicateTaskDuration(time.Duration, bool) AddReplicateTaskDuration(time.Duration, bool)
AddReplicateWaitDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool)
AddSyncDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool)
AddOperation(string, string)
} }
type treeServiceMetrics struct { type treeServiceMetrics struct {
replicateTaskDuration *prometheus.HistogramVec replicateTaskDuration *prometheus.HistogramVec
replicateWaitDuration *prometheus.HistogramVec replicateWaitDuration *prometheus.HistogramVec
syncOpDuration *prometheus.HistogramVec syncOpDuration *prometheus.HistogramVec
ioTagOpsCounter *prometheus.CounterVec
} }
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
@ -44,12 +42,6 @@ func newTreeServiceMetrics() *treeServiceMetrics {
Name: "sync_duration_seconds", Name: "sync_duration_seconds",
Help: "Duration of synchronization operations", Help: "Duration of synchronization operations",
}, []string{successLabel}), }, []string{successLabel}),
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: treeServiceSubsystem,
Name: "requests_total",
Help: "Count of requests for each IO tag",
}, []string{methodLabel, ioTagLabel}),
} }
} }
@ -70,10 +62,3 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
successLabel: strconv.FormatBool(success), successLabel: strconv.FormatBool(success),
}).Observe(d.Seconds()) }).Observe(d.Seconds())
} }
func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
m.ioTagOpsCounter.With(prometheus.Labels{
ioTagLabel: ioTag,
methodLabel: op,
}).Inc()
}

View file

@ -26,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
if err != nil { if err != nil {
tag = IOTagClient tag = IOTagClient
} }
if tag.IsLocal() { if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal tag = IOTagInternal
} }
ctx = tagging.ContextWithIOTag(ctx, tag.String()) ctx = tagging.ContextWithIOTag(ctx, tag.String())
@ -44,7 +44,7 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
if err != nil { if err != nil {
tag = IOTagClient tag = IOTagClient
} }
if tag.IsLocal() { if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal tag = IOTagInternal
} }
ctx = tagging.ContextWithIOTag(ctx, tag.String()) ctx = tagging.ContextWithIOTag(ctx, tag.String())

View file

@ -1,219 +0,0 @@
package qos_test
import (
"context"
"errors"
"fmt"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
)
const (
okKey = "ok"
)
var (
errTest = errors.New("mock")
errWrongTag = errors.New("wrong tag")
errNoTag = errors.New("failed to get tag from context")
errResExhausted *apistatus.ResourceExhausted
tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
)
type mockGRPCServerStream struct {
grpc.ServerStream
ctx context.Context
}
func (m *mockGRPCServerStream) Context() context.Context {
return m.ctx
}
type limiter struct {
acquired bool
released bool
}
func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
l.acquired = true
if key != okKey {
return nil, false
}
return func() { l.released = true }, true
}
func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
handler := func(ctx context.Context, req any) (any, error) {
return nil, errTest
}
_, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
return err
}
func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
handler := func(srv any, stream grpc.ServerStream) error {
return errTest
}
err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
FullMethod: methodName,
}, handler)
return err
}
func Test_MaxActiveRPCLimiter(t *testing.T) {
// UnaryServerInterceptor
t.Run("unary fail", func(t *testing.T) {
var lim limiter
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
require.ErrorAs(t, err, &errResExhausted)
require.True(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("unary pass critical", func(t *testing.T) {
var lim limiter
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
require.ErrorIs(t, err, errTest)
require.False(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("unary pass", func(t *testing.T) {
var lim limiter
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.ErrorIs(t, err, errTest)
require.True(t, lim.acquired)
require.True(t, lim.released)
})
// StreamServerInterceptor
t.Run("stream fail", func(t *testing.T) {
var lim limiter
err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
require.ErrorAs(t, err, &errResExhausted)
require.True(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("stream pass critical", func(t *testing.T) {
var lim limiter
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
err := streamMaxActiveRPCLimiter(ctx, &lim, "")
require.ErrorIs(t, err, errTest)
require.False(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("stream pass", func(t *testing.T) {
var lim limiter
err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.ErrorIs(t, err, errTest)
require.True(t, lim.acquired)
require.True(t, lim.released)
})
}
func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
called := false
handler := func(ctx context.Context, req any) (any, error) {
called = true
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
return nil, nil
}
return nil, errWrongTag
}
_, err := interceptor(context.Background(), nil, nil, handler)
require.NoError(t, err)
require.True(t, called)
}
func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
// check context with no value
called := false
invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
called = true
if _, ok := tagging.IOTagFromContext(ctx); ok {
return fmt.Errorf("%v: expected no IO tags", errWrongTag)
}
return nil
}
require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
require.True(t, called)
// check context for internal tag
targetTag := qos.IOTagInternal.String()
invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
raw, ok := tagging.IOTagFromContext(ctx)
if !ok {
return errNoTag
}
if raw != targetTag {
return errWrongTag
}
return nil
}
for _, tag := range tags {
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
}
// check context for client tag
ctx := tagging.ContextWithIOTag(context.Background(), "")
targetTag = qos.IOTagClient.String()
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
}
func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
// check context with no value
called := false
streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
called = true
if _, ok := tagging.IOTagFromContext(ctx); ok {
return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
}
return nil, nil
}
_, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
require.True(t, called)
require.NoError(t, err)
// check context for internal tag
targetTag := qos.IOTagInternal.String()
streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
raw, ok := tagging.IOTagFromContext(ctx)
if !ok {
return nil, errNoTag
}
if raw != targetTag {
return nil, errWrongTag
}
return nil, nil
}
for _, tag := range tags {
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
_, err := interceptor(ctx, nil, nil, "", streamer, nil)
require.NoError(t, err)
}
// check context for client tag
ctx := tagging.ContextWithIOTag(context.Background(), "")
targetTag = qos.IOTagClient.String()
_, err = interceptor(ctx, nil, nil, "", streamer, nil)
require.NoError(t, err)
}

View file

@ -4,8 +4,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"sync"
"sync/atomic"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
@ -17,9 +15,6 @@ import (
const ( const (
defaultIdleTimeout time.Duration = 0 defaultIdleTimeout time.Duration = 0
defaultShare float64 = 1.0 defaultShare float64 = 1.0
minusOne = ^uint64(0)
defaultMetricsCollectTimeout = 5 * time.Second
) )
type ReleaseFunc scheduling.ReleaseFunc type ReleaseFunc scheduling.ReleaseFunc
@ -27,8 +22,6 @@ type ReleaseFunc scheduling.ReleaseFunc
type Limiter interface { type Limiter interface {
ReadRequest(context.Context) (ReleaseFunc, error) ReadRequest(context.Context) (ReleaseFunc, error)
WriteRequest(context.Context) (ReleaseFunc, error) WriteRequest(context.Context) (ReleaseFunc, error)
SetParentID(string)
SetMetrics(Metrics)
Close() Close()
} }
@ -41,6 +34,10 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
if err := validateConfig(c); err != nil { if err := validateConfig(c); err != nil {
return nil, err return nil, err
} }
read, write := c.Read(), c.Write()
if isNoop(read, write) {
return noopLimiterInstance, nil
}
readScheduler, err := createScheduler(c.Read()) readScheduler, err := createScheduler(c.Read())
if err != nil { if err != nil {
return nil, fmt.Errorf("create read scheduler: %w", err) return nil, fmt.Errorf("create read scheduler: %w", err)
@ -49,18 +46,10 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("create write scheduler: %w", err) return nil, fmt.Errorf("create write scheduler: %w", err)
} }
l := &mClockLimiter{ return &mClockLimiter{
readScheduler: readScheduler, readScheduler: readScheduler,
writeScheduler: writeScheduler, writeScheduler: writeScheduler,
closeCh: make(chan struct{}), }, nil
wg: &sync.WaitGroup{},
readStats: createStats(),
writeStats: createStats(),
}
l.shardID.Store(&shardID{})
l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
l.startMetricsCollect()
return l, nil
} }
func createScheduler(config limits.OpConfig) (scheduler, error) { func createScheduler(config limits.OpConfig) (scheduler, error) {
@ -74,7 +63,7 @@ func createScheduler(config limits.OpConfig) (scheduler, error) {
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
result := make(map[string]scheduling.TagInfo) result := make(map[string]scheduling.TagInfo)
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} {
result[tag.String()] = scheduling.TagInfo{ result[tag.String()] = scheduling.TagInfo{
Share: defaultShare, Share: defaultShare,
} }
@ -90,7 +79,6 @@ func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.T
if l.ReservedOps != nil && *l.ReservedOps != 0 { if l.ReservedOps != nil && *l.ReservedOps != 0 {
v.ReservedIOPS = l.ReservedOps v.ReservedIOPS = l.ReservedOps
} }
v.Prohibited = l.Prohibited
result[l.Tag] = v result[l.Tag] = v
} }
return result return result
@ -103,7 +91,7 @@ var (
) )
func NewNoopLimiter() Limiter { func NewNoopLimiter() Limiter {
return noopLimiterInstance return &noopLimiter{}
} }
type noopLimiter struct{} type noopLimiter struct{}
@ -116,127 +104,43 @@ func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
return releaseStub, nil return releaseStub, nil
} }
func (n *noopLimiter) SetParentID(string) {}
func (n *noopLimiter) Close() {} func (n *noopLimiter) Close() {}
func (n *noopLimiter) SetMetrics(Metrics) {}
var _ Limiter = (*mClockLimiter)(nil) var _ Limiter = (*mClockLimiter)(nil)
type shardID struct {
id string
}
type mClockLimiter struct { type mClockLimiter struct {
readScheduler scheduler readScheduler scheduler
writeScheduler scheduler writeScheduler scheduler
readStats map[string]*stat
writeStats map[string]*stat
shardID atomic.Pointer[shardID]
metrics atomic.Pointer[metricsHolder]
closeCh chan struct{}
wg *sync.WaitGroup
} }
func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
return requestArrival(ctx, n.readScheduler, n.readStats) return requestArrival(ctx, n.readScheduler)
} }
func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
return requestArrival(ctx, n.writeScheduler, n.writeStats) return requestArrival(ctx, n.writeScheduler)
} }
func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { func requestArrival(ctx context.Context, s scheduler) (ReleaseFunc, error) {
tag, ok := tagging.IOTagFromContext(ctx) tag, ok := tagging.IOTagFromContext(ctx)
if !ok { if !ok {
tag = IOTagClient.String() tag = IOTagClient.String()
} }
stat := getStat(tag, stats)
stat.pending.Add(1)
if tag == IOTagCritical.String() { if tag == IOTagCritical.String() {
stat.inProgress.Add(1) return releaseStub, nil
return func() {
stat.completed.Add(1)
}, nil
} }
rel, err := s.RequestArrival(ctx, tag) rel, err := s.RequestArrival(ctx, tag)
stat.inProgress.Add(1)
if err != nil { if err != nil {
if isResourceExhaustedErr(err) { if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
stat.resourceExhausted.Add(1) errors.Is(err, errSemaphoreLimitExceeded) {
return nil, &apistatus.ResourceExhausted{} return nil, &apistatus.ResourceExhausted{}
} }
stat.completed.Add(1)
return nil, err return nil, err
} }
return func() { return ReleaseFunc(rel), nil
rel()
stat.completed.Add(1)
}, nil
} }
func (n *mClockLimiter) Close() { func (n *mClockLimiter) Close() {
n.readScheduler.Close() n.readScheduler.Close()
n.writeScheduler.Close() n.writeScheduler.Close()
close(n.closeCh)
n.wg.Wait()
n.metrics.Load().metrics.Close(n.shardID.Load().id)
}
func (n *mClockLimiter) SetParentID(parentID string) {
n.shardID.Store(&shardID{id: parentID})
}
func (n *mClockLimiter) SetMetrics(m Metrics) {
n.metrics.Store(&metricsHolder{metrics: m})
}
func (n *mClockLimiter) startMetricsCollect() {
n.wg.Add(1)
go func() {
defer n.wg.Done()
ticker := time.NewTicker(defaultMetricsCollectTimeout)
defer ticker.Stop()
for {
select {
case <-n.closeCh:
return
case <-ticker.C:
shardID := n.shardID.Load().id
if shardID == "" {
continue
}
metrics := n.metrics.Load().metrics
exportMetrics(metrics, n.readStats, shardID, "read")
exportMetrics(metrics, n.writeStats, shardID, "write")
}
}
}()
}
func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
var pending uint64
var inProgress uint64
var completed uint64
var resExh uint64
for tag, s := range stats {
pending = s.pending.Load()
inProgress = s.inProgress.Load()
completed = s.completed.Load()
resExh = s.resourceExhausted.Load()
if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
continue
}
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
}
}
func isResourceExhaustedErr(err error) bool {
return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) ||
errors.Is(err, scheduling.ErrTagRequestsProhibited)
} }

View file

@ -1,31 +0,0 @@
package qos
import "sync/atomic"
type Metrics interface {
SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
Close(shardID string)
}
var _ Metrics = (*noopMetrics)(nil)
type noopMetrics struct{}
func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
}
func (n *noopMetrics) Close(string) {}
// stat presents limiter statistics cumulative counters.
//
// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
type stat struct {
completed atomic.Uint64
pending atomic.Uint64
resourceExhausted atomic.Uint64
inProgress atomic.Uint64
}
type metricsHolder struct {
metrics Metrics
}

View file

@ -1,29 +0,0 @@
package qos
const unknownStatsTag = "unknown"
var statTags = map[string]struct{}{
IOTagBackground.String(): {},
IOTagClient.String(): {},
IOTagCritical.String(): {},
IOTagInternal.String(): {},
IOTagPolicer.String(): {},
IOTagTreeSync.String(): {},
IOTagWritecache.String(): {},
unknownStatsTag: {},
}
func createStats() map[string]*stat {
result := make(map[string]*stat)
for tag := range statTags {
result[tag] = &stat{}
}
return result
}
func getStat(tag string, stats map[string]*stat) *stat {
if v, ok := stats[tag]; ok {
return v
}
return stats[unknownStatsTag]
}

View file

@ -1,42 +1,34 @@
package qos package qos
import ( import "fmt"
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
)
type IOTag string type IOTag string
const ( const (
IOTagBackground IOTag = "background"
IOTagClient IOTag = "client" IOTagClient IOTag = "client"
IOTagCritical IOTag = "critical"
IOTagInternal IOTag = "internal" IOTagInternal IOTag = "internal"
IOTagPolicer IOTag = "policer" IOTagBackground IOTag = "background"
IOTagTreeSync IOTag = "treesync"
IOTagWritecache IOTag = "writecache" IOTagWritecache IOTag = "writecache"
IOTagPolicer IOTag = "policer"
IOTagCritical IOTag = "critical"
ioTagUnknown IOTag = "" ioTagUnknown IOTag = ""
) )
func FromRawString(s string) (IOTag, error) { func FromRawString(s string) (IOTag, error) {
switch s { switch s {
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagCritical): case string(IOTagCritical):
return IOTagCritical, nil return IOTagCritical, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagInternal): case string(IOTagInternal):
return IOTagInternal, nil return IOTagInternal, nil
case string(IOTagPolicer): case string(IOTagBackground):
return IOTagPolicer, nil return IOTagBackground, nil
case string(IOTagTreeSync):
return IOTagTreeSync, nil
case string(IOTagWritecache): case string(IOTagWritecache):
return IOTagWritecache, nil return IOTagWritecache, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
default: default:
return ioTagUnknown, fmt.Errorf("unknown tag %s", s) return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
} }
@ -45,15 +37,3 @@ func FromRawString(s string) (IOTag, error) {
func (t IOTag) String() string { func (t IOTag) String() string {
return string(t) return string(t)
} }
func IOTagFromContext(ctx context.Context) string {
tag, ok := tagging.IOTagFromContext(ctx)
if !ok {
tag = "undefined"
}
return tag
}
func (t IOTag) IsLocal() bool {
return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
}

View file

@ -42,12 +42,11 @@ func validateOpConfig(c limits.OpConfig) error {
func validateTags(configTags []limits.IOTagConfig) error { func validateTags(configTags []limits.IOTagConfig) error {
tags := map[IOTag]tagConfig{ tags := map[IOTag]tagConfig{
IOTagBackground: {},
IOTagClient: {}, IOTagClient: {},
IOTagInternal: {}, IOTagInternal: {},
IOTagPolicer: {}, IOTagBackground: {},
IOTagTreeSync: {},
IOTagWritecache: {}, IOTagWritecache: {},
IOTagPolicer: {},
} }
for _, t := range configTags { for _, t := range configTags {
tag, err := FromRawString(t.Tag) tag, err := FromRawString(t.Tag)
@ -91,3 +90,12 @@ func float64Value(f *float64) float64 {
} }
return *f return *f
} }
func isNoop(read, write limits.OpConfig) bool {
return read.MaxRunningOps == limits.NoLimit &&
read.MaxWaitingOps == limits.NoLimit &&
write.MaxRunningOps == limits.NoLimit &&
write.MaxWaitingOps == limits.NoLimit &&
len(read.Tags) == 0 &&
len(write.Tags) == 0
}

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -411,11 +410,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}, },
), ),
WithNetmapSource( WithNetmapSource(
&utilTesting.TestNetmapSource{ &testNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM, curEpoch: currentEpochNM,
}, },
CurrentEpoch: curEpoch, currentEpoch: curEpoch,
}, },
), ),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -484,12 +483,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}, },
), ),
WithNetmapSource( WithNetmapSource(
&utilTesting.TestNetmapSource{ &testNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM, curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM, curEpoch - 1: previousEpochNM,
}, },
CurrentEpoch: curEpoch, currentEpoch: curEpoch,
}, },
), ),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -560,12 +559,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
}, },
), ),
WithNetmapSource( WithNetmapSource(
&utilTesting.TestNetmapSource{ &testNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{ netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM, curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM, curEpoch - 1: previousEpochNM,
}, },
CurrentEpoch: curEpoch, currentEpoch: curEpoch,
}, },
), ),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -597,3 +596,26 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container
func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
return nil, nil return nil, nil
} }
type testNetmapSource struct {
netmaps map[uint64]*netmap.NetMap
currentEpoch uint64
}
func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff")
}
return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, fmt.Errorf("netmap not found")
}
func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}

View file

@ -3,7 +3,6 @@ package blobstortest
import ( import (
"context" "context"
"errors" "errors"
"slices"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@ -27,7 +26,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
_, err := s.Delete(context.Background(), delPrm) _, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err) require.NoError(t, err)
objects = slices.Delete(objects, delID, delID+1) objects = append(objects[:delID], objects[delID+1:]...)
runTestNormalHandler(t, s, objects) runTestNormalHandler(t, s, objects)

View file

@ -153,10 +153,16 @@ func (e *StorageEngine) Close(ctx context.Context) error {
} }
// closes all shards. Never returns an error, shard errors are logged. // closes all shards. Never returns an error, shard errors are logged.
func (e *StorageEngine) close(ctx context.Context) error { func (e *StorageEngine) close(ctx context.Context, releasePools bool) error {
e.mtx.RLock() e.mtx.RLock()
defer e.mtx.RUnlock() defer e.mtx.RUnlock()
if releasePools {
for _, p := range e.shardPools {
p.Release()
}
}
for id, sh := range e.shards { for id, sh := range e.shards {
if err := sh.Close(ctx); err != nil { if err := sh.Close(ctx); err != nil {
e.log.Debug(ctx, logs.EngineCouldNotCloseShard, e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
@ -207,7 +213,7 @@ func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error {
return e.open(ctx) return e.open(ctx)
} }
} else if prevErr == nil { // ok -> block } else if prevErr == nil { // ok -> block
return e.close(ctx) return e.close(ctx, errors.Is(err, errClosed))
} }
// otherwise do nothing // otherwise do nothing

View file

@ -245,6 +245,7 @@ func TestReload(t *testing.T) {
// no new paths => no new shards // no new paths => no new shards
require.Equal(t, shardNum, len(e.shards)) require.Equal(t, shardNum, len(e.shards))
require.Equal(t, shardNum, len(e.shardPools))
newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum))
@ -256,6 +257,7 @@ func TestReload(t *testing.T) {
require.NoError(t, e.Reload(context.Background(), rcfg)) require.NoError(t, e.Reload(context.Background(), rcfg))
require.Equal(t, shardNum+1, len(e.shards)) require.Equal(t, shardNum+1, len(e.shards))
require.Equal(t, shardNum+1, len(e.shardPools))
require.NoError(t, e.Close(context.Background())) require.NoError(t, e.Close(context.Background()))
}) })
@ -275,6 +277,7 @@ func TestReload(t *testing.T) {
// removed one // removed one
require.Equal(t, shardNum-1, len(e.shards)) require.Equal(t, shardNum-1, len(e.shards))
require.Equal(t, shardNum-1, len(e.shardPools))
require.NoError(t, e.Close(context.Background())) require.NoError(t, e.Close(context.Background()))
}) })
@ -308,6 +311,7 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
} }
require.Equal(t, num, len(e.shards)) require.Equal(t, num, len(e.shards))
require.Equal(t, num, len(e.shardPools))
return e, currShards return e, currShards
} }

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -28,6 +29,8 @@ type StorageEngine struct {
shards map[string]hashedShard shards map[string]hashedShard
shardPools map[string]util.WorkerPool
closeCh chan struct{} closeCh chan struct{}
setModeCh chan setModeRequest setModeCh chan setModeRequest
wg sync.WaitGroup wg sync.WaitGroup
@ -190,6 +193,8 @@ type cfg struct {
metrics MetricRegister metrics MetricRegister
shardPoolSize uint32
lowMem bool lowMem bool
containerSource atomic.Pointer[containerSource] containerSource atomic.Pointer[containerSource]
@ -197,8 +202,9 @@ type cfg struct {
func defaultCfg() *cfg { func defaultCfg() *cfg {
res := &cfg{ res := &cfg{
log: logger.NewLoggerWrapper(zap.L()), log: logger.NewLoggerWrapper(zap.L()),
metrics: noopMetrics{}, shardPoolSize: 20,
metrics: noopMetrics{},
} }
res.containerSource.Store(&containerSource{}) res.containerSource.Store(&containerSource{})
return res return res
@ -215,6 +221,7 @@ func New(opts ...Option) *StorageEngine {
return &StorageEngine{ return &StorageEngine{
cfg: c, cfg: c,
shards: make(map[string]hashedShard), shards: make(map[string]hashedShard),
shardPools: make(map[string]util.WorkerPool),
closeCh: make(chan struct{}), closeCh: make(chan struct{}),
setModeCh: make(chan setModeRequest), setModeCh: make(chan setModeRequest),
evacuateLimiter: &evacuationLimiter{}, evacuateLimiter: &evacuationLimiter{},
@ -234,6 +241,13 @@ func WithMetrics(v MetricRegister) Option {
} }
} }
// WithShardPoolSize returns option to specify size of worker pool for each shard.
func WithShardPoolSize(sz uint32) Option {
return func(c *cfg) {
c.shardPoolSize = sz
}
}
// WithErrorThreshold returns an option to specify size amount of errors after which // WithErrorThreshold returns an option to specify size amount of errors after which
// shard is moved to read-only mode. // shard is moved to read-only mode.
func WithErrorThreshold(sz uint32) Option { func WithErrorThreshold(sz uint32) Option {

View file

@ -2,11 +2,8 @@ package engine
import ( import (
"context" "context"
"fmt"
"path/filepath" "path/filepath"
"runtime/debug" "sync/atomic"
"strings"
"sync"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
@ -60,6 +57,7 @@ func (te *testEngineWrapper) setShardsNumOpts(
te.shardIDs[i] = shard.ID() te.shardIDs[i] = shard.ID()
} }
require.Len(t, te.engine.shards, num) require.Len(t, te.engine.shards, num)
require.Len(t, te.engine.shardPools, num)
return te return te
} }
@ -160,74 +158,22 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
var _ qos.Limiter = (*testQoSLimiter)(nil) var _ qos.Limiter = (*testQoSLimiter)(nil)
type testQoSLimiter struct { type testQoSLimiter struct {
t testing.TB t testing.TB
quard sync.Mutex read atomic.Int64
id int64 write atomic.Int64
readStacks map[int64][]byte
writeStacks map[int64][]byte
} }
func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
func (t *testQoSLimiter) Close() { func (t *testQoSLimiter) Close() {
t.quard.Lock() require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0")
defer t.quard.Unlock() require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0")
var sb strings.Builder
var seqN int
for _, stack := range t.readStacks {
seqN++
sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
}
for _, stack := range t.writeStacks {
seqN++
sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
}
require.True(t.t, seqN == 0, sb.String())
} }
func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
t.quard.Lock() t.read.Add(1)
defer t.quard.Unlock() return func() { t.read.Add(-1) }, nil
stack := debug.Stack()
t.id++
id := t.id
if t.readStacks == nil {
t.readStacks = make(map[int64][]byte)
}
t.readStacks[id] = stack
return func() {
t.quard.Lock()
defer t.quard.Unlock()
delete(t.readStacks, id)
}, nil
} }
func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
t.quard.Lock() t.write.Add(1)
defer t.quard.Unlock() return func() { t.write.Add(-1) }, nil
stack := debug.Stack()
t.id++
id := t.id
if t.writeStacks == nil {
t.writeStacks = make(map[int64][]byte)
}
t.writeStacks[id] = stack
return func() {
t.quard.Lock()
defer t.quard.Unlock()
delete(t.writeStacks, id)
}, nil
} }
func (t *testQoSLimiter) SetParentID(string) {}

View file

@ -46,6 +46,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
var testShards [2]*testShard var testShards [2]*testShard
te := testNewEngine(t, te := testNewEngine(t,
WithShardPoolSize(1),
WithErrorThreshold(errThreshold), WithErrorThreshold(errThreshold),
). ).
setShardsNumOpts(t, 2, func(id int) []shard.Option { setShardsNumOpts(t, 2, func(id int) []shard.Option {

View file

@ -15,6 +15,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@ -200,6 +201,11 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
return res return res
} }
type pooledShard struct {
hashedShard
pool util.WorkerPool
}
var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others. // Evacuate moves data from one shard to the others.
@ -246,7 +252,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
} }
var mtx sync.RWMutex var mtx sync.RWMutex
copyShards := func() []hashedShard { copyShards := func() []pooledShard {
mtx.RLock() mtx.RLock()
defer mtx.RUnlock() defer mtx.RUnlock()
t := slices.Clone(shards) t := slices.Clone(shards)
@ -260,7 +266,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
} }
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error { ) error {
var err error var err error
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
@ -382,7 +388,7 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha
} }
func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
egContainer *errgroup.Group, egObject *errgroup.Group, egContainer *errgroup.Group, egObject *errgroup.Group,
) error { ) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
@ -406,7 +412,7 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel
} }
func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
egContainer *errgroup.Group, egObject *errgroup.Group, egContainer *errgroup.Group, egObject *errgroup.Group,
) error { ) error {
sh := shardsToEvacuate[shardID] sh := shardsToEvacuate[shardID]
@ -479,7 +485,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
} }
func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error { ) error {
sh := shardsToEvacuate[shardID] sh := shardsToEvacuate[shardID]
shards := getShards() shards := getShards()
@ -509,7 +515,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string,
} }
func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID,
prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error { ) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees",
trace.WithAttributes( trace.WithAttributes(
@ -577,7 +583,7 @@ func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.S
} }
func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID,
prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) (bool, string, error) { ) (bool, string, error) {
target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate)
if err != nil { if err != nil {
@ -647,15 +653,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar
// findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. // findShardToEvacuateTree returns first shard according HRW or first shard with tree exists.
func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID,
shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) (hashedShard, bool, error) { ) (pooledShard, bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString()))
var result hashedShard var result pooledShard
var found bool var found bool
for _, target := range shards { for _, target := range shards {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return hashedShard{}, false, ctx.Err() return pooledShard{}, false, ctx.Err()
default: default:
} }
@ -683,7 +689,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora
return result, found, nil return result, found, nil
} }
func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) {
e.mtx.RLock() e.mtx.RLock()
defer e.mtx.RUnlock() defer e.mtx.RUnlock()
@ -713,15 +719,18 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
// We must have all shards, to have correct information about their // We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase. // indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put. // Evacuated shard is skipped during put.
shards := make([]hashedShard, 0, len(e.shards)) shards := make([]pooledShard, 0, len(e.shards))
for id := range e.shards { for id := range e.shards {
shards = append(shards, e.shards[id]) shards = append(shards, pooledShard{
hashedShard: e.shards[id],
pool: e.shardPools[id],
})
} }
return shards, nil return shards, nil
} }
func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
) error { ) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
defer span.End() defer span.End()
@ -791,7 +800,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
} }
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
) (bool, error) { ) (bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
for j := range shards { for j := range shards {
@ -804,7 +813,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue continue
} }
switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status {
case putToShardSuccess: case putToShardSuccess:
res.objEvacuated.Add(1) res.objEvacuated.Add(1)
e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard,

View file

@ -196,6 +196,7 @@ func TestEvacuateShardObjects(t *testing.T) {
e.mtx.Lock() e.mtx.Lock()
delete(e.shards, evacuateShardID) delete(e.shards, evacuateShardID)
delete(e.shardPools, evacuateShardID)
e.mtx.Unlock() e.mtx.Unlock()
checkHasObjects(t) checkHasObjects(t)
@ -404,8 +405,8 @@ func TestEvacuateSingleProcess(t *testing.T) {
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
blocker := make(chan any) blocker := make(chan interface{})
running := make(chan any) running := make(chan interface{})
var prm EvacuateShardPrm var prm EvacuateShardPrm
prm.ShardID = ids[1:2] prm.ShardID = ids[1:2]
@ -446,8 +447,8 @@ func TestEvacuateObjectsAsync(t *testing.T) {
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
blocker := make(chan any) blocker := make(chan interface{})
running := make(chan any) running := make(chan interface{})
var prm EvacuateShardPrm var prm EvacuateShardPrm
prm.ShardID = ids[1:2] prm.ShardID = ids[1:2]

View file

@ -205,7 +205,7 @@ func BenchmarkInhumeMultipart(b *testing.B) {
func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) {
b.StopTimer() b.StopTimer()
engine := testNewEngine(b). engine := testNewEngine(b, WithShardPoolSize(uint32(numObjects))).
setShardsNum(b, numShards).prepare(b).engine setShardsNum(b, numShards).prepare(b).engine
defer func() { require.NoError(b, engine.Close(context.Background())) }() defer func() { require.NoError(b, engine.Close(context.Background())) }()

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -98,13 +99,13 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
var shRes putToShardRes var shRes putToShardRes
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
e.mtx.RLock() e.mtx.RLock()
_, ok := e.shards[sh.ID().String()] pool, ok := e.shardPools[sh.ID().String()]
e.mtx.RUnlock() e.mtx.RUnlock()
if !ok { if !ok {
// Shard was concurrently removed, skip. // Shard was concurrently removed, skip.
return false return false
} }
shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer)
return shRes.status != putToShardUnknown return shRes.status != putToShardUnknown
}) })
switch shRes.status { switch shRes.status {
@ -121,59 +122,70 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// putToShard puts object to sh. // putToShard puts object to sh.
// Return putToShardStatus and error if it is necessary to propagate an error upper. // Return putToShardStatus and error if it is necessary to propagate an error upper.
func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool,
addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
) (res putToShardRes) { ) (res putToShardRes) {
var existPrm shard.ExistsPrm exitCh := make(chan struct{})
existPrm.Address = addr
exists, err := sh.Exists(ctx, existPrm) if err := pool.Submit(func() {
if err != nil { defer close(exitCh)
if shard.IsErrObjectExpired(err) {
// object is already found but var existPrm shard.ExistsPrm
// expired => do nothing with it existPrm.Address = addr
exists, err := sh.Exists(ctx, existPrm)
if err != nil {
if shard.IsErrObjectExpired(err) {
// object is already found but
// expired => do nothing with it
res.status = putToShardExists
} else {
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
zap.Stringer("shard_id", sh.ID()),
zap.Error(err))
}
return // this is not ErrAlreadyRemoved error so we can go to the next shard
}
if exists.Exists() {
res.status = putToShardExists res.status = putToShardExists
} else {
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
zap.Stringer("shard_id", sh.ID()),
zap.Error(err))
}
return // this is not ErrAlreadyRemoved error so we can go to the next shard
}
if exists.Exists() {
res.status = putToShardExists
return
}
var putPrm shard.PutPrm
putPrm.SetObject(obj)
putPrm.SetIndexAttributes(isIndexedContainer)
_, err = sh.Put(ctx, putPrm)
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.Error(err))
return
}
if client.IsErrObjectAlreadyRemoved(err) {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.Error(err))
res.status = putToShardRemoved
res.err = err
return return
} }
e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) var putPrm shard.PutPrm
return putPrm.SetObject(obj)
putPrm.SetIndexAttributes(isIndexedContainer)
_, err = sh.Put(ctx, putPrm)
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.Error(err))
return
}
if client.IsErrObjectAlreadyRemoved(err) {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.Error(err))
res.status = putToShardRemoved
res.err = err
return
}
e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr))
return
}
res.status = putToShardSuccess
}); err != nil {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err))
close(exitCh)
} }
res.status = putToShardSuccess <-exitCh
return return
} }

View file

@ -17,6 +17,7 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw" "git.frostfs.info/TrueCloudLab/hrw"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -180,6 +181,11 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
e.mtx.Lock() e.mtx.Lock()
defer e.mtx.Unlock() defer e.mtx.Unlock()
pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
if err != nil {
return fmt.Errorf("create pool: %w", err)
}
strID := sh.ID().String() strID := sh.ID().String()
if _, ok := e.shards[strID]; ok { if _, ok := e.shards[strID]; ok {
return fmt.Errorf("shard with id %s was already added", strID) return fmt.Errorf("shard with id %s was already added", strID)
@ -193,6 +199,8 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
hash: hrw.StringHash(strID), hash: hrw.StringHash(strID),
} }
e.shardPools[strID] = pool
return nil return nil
} }
@ -217,6 +225,12 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) {
ss = append(ss, sh) ss = append(ss, sh)
delete(e.shards, id) delete(e.shards, id)
pool, ok := e.shardPools[id]
if ok {
pool.Release()
delete(e.shardPools, id)
}
e.log.Info(ctx, logs.EngineShardHasBeenRemoved, e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
zap.String("id", id)) zap.String("id", id))
} }
@ -318,6 +332,8 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
// HandleNewEpoch notifies every shard about NewEpoch event. // HandleNewEpoch notifies every shard about NewEpoch event.
func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
ev := shard.EventNewEpoch(epoch)
e.mtx.RLock() e.mtx.RLock()
defer e.mtx.RUnlock() defer e.mtx.RUnlock()
@ -325,7 +341,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
case sh.NotificationChannel() <- epoch: case sh.NotificationChannel() <- ev:
default: default:
e.log.Debug(ctx, logs.ShardEventProcessingInProgress, e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))
@ -413,6 +429,12 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha
delete(e.shards, idStr) delete(e.shards, idStr)
pool, ok := e.shardPools[idStr]
if ok {
pool.Release()
delete(e.shardPools, idStr)
}
e.log.Info(ctx, logs.EngineShardHasBeenRemoved, e.log.Info(ctx, logs.EngineShardHasBeenRemoved,
zap.String("id", idStr)) zap.String("id", idStr))
} }

View file

@ -17,6 +17,7 @@ func TestRemoveShard(t *testing.T) {
e, ids := te.engine, te.shardIDs e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }() defer func() { require.NoError(t, e.Close(context.Background())) }()
require.Equal(t, numOfShards, len(e.shardPools))
require.Equal(t, numOfShards, len(e.shards)) require.Equal(t, numOfShards, len(e.shards))
removedNum := numOfShards / 2 removedNum := numOfShards / 2
@ -36,6 +37,7 @@ func TestRemoveShard(t *testing.T) {
} }
} }
require.Equal(t, numOfShards-removedNum, len(e.shardPools))
require.Equal(t, numOfShards-removedNum, len(e.shards)) require.Equal(t, numOfShards-removedNum, len(e.shards))
for id, removed := range mSh { for id, removed := range mSh {

View file

@ -230,7 +230,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
} }
// TreeSortedByFilename implements the pilorama.Forest interface. // TreeSortedByFilename implements the pilorama.Forest interface.
func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
@ -241,7 +241,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
var err error var err error
var nodes []pilorama.MultiNodeInfo var nodes []pilorama.MultiNodeInfo
var cursor *pilorama.Cursor var cursor *string
for _, sh := range e.sortShards(cid) { for _, sh := range e.sortShards(cid) {
nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
if err != nil { if err != nil {

View file

@ -1,82 +0,0 @@
package meta
import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.etcd.io/bbolt"
)
type bucketCache struct {
locked *bbolt.Bucket
graveyard *bbolt.Bucket
garbage *bbolt.Bucket
expired map[cid.ID]*bbolt.Bucket
primary map[cid.ID]*bbolt.Bucket
}
func newBucketCache() *bucketCache {
return &bucketCache{}
}
func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
if bc == nil {
return tx.Bucket(bucketNameLocked)
}
return getBucket(&bc.locked, tx, bucketNameLocked)
}
func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
if bc == nil {
return tx.Bucket(graveyardBucketName)
}
return getBucket(&bc.graveyard, tx, graveyardBucketName)
}
func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
if bc == nil {
return tx.Bucket(garbageBucketName)
}
return getBucket(&bc.garbage, tx, garbageBucketName)
}
func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket {
if *cache != nil {
return *cache
}
*cache = tx.Bucket(name)
return *cache
}
func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
if bc == nil {
bucketName := make([]byte, bucketKeySize)
bucketName = objectToExpirationEpochBucketName(cnr, bucketName)
return tx.Bucket(bucketName)
}
return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr)
}
func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
if bc == nil {
bucketName := make([]byte, bucketKeySize)
bucketName = primaryBucketName(cnr, bucketName)
return tx.Bucket(bucketName)
}
return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr)
}
func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
value, ok := (*m)[cnr]
if ok {
return value
}
if *m == nil {
*m = make(map[cid.ID]*bbolt.Bucket, 1)
}
bucketName := make([]byte, bucketKeySize)
bucketName = nameFunc(cnr, bucketName)
(*m)[cnr] = getBucket(&value, tx, bucketName)
return value
}

View file

@ -153,16 +153,12 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE
// - 2 if object is covered with tombstone; // - 2 if object is covered with tombstone;
// - 3 if object is expired. // - 3 if object is expired.
func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
return objectStatusWithCache(nil, tx, addr, currEpoch)
}
func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired // locked object could not be removed/marked with GC/expired
if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { if objectLocked(tx, addr.Container(), addr.Object()) {
return 0, nil return 0, nil
} }
expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) expired, err := isExpired(tx, addr, currEpoch)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -171,8 +167,8 @@ func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, curr
return 3, nil return 3, nil
} }
graveyardBkt := getGraveyardBucket(bc, tx) graveyardBkt := tx.Bucket(graveyardBucketName)
garbageBkt := getGarbageBucket(bc, tx) garbageBkt := tx.Bucket(garbageBucketName)
addrKey := addressKey(addr, make([]byte, addressKeySize)) addrKey := addressKey(addr, make([]byte, addressKeySize))
return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
} }

View file

@ -74,11 +74,9 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
} }
func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
return isExpiredWithCache(nil, tx, addr, currEpoch) bucketName := make([]byte, bucketKeySize)
} bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName)
b := tx.Bucket(bucketName)
func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
b := getExpiredBucket(bc, tx, addr.Container())
if b == nil { if b == nil {
return false, nil return false, nil
} }

View file

@ -88,12 +88,8 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
} }
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
}
func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus { if checkStatus {
st, err := objectStatusWithCache(bc, tx, addr, currEpoch) st, err := objectStatus(tx, addr, currEpoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -113,13 +109,12 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
bucketName := make([]byte, bucketKeySize) bucketName := make([]byte, bucketKeySize)
// check in primary index // check in primary index
if b := getPrimaryBucket(bc, tx, cnr); b != nil { data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
if data := b.Get(key); len(data) != 0 { if len(data) != 0 {
return obj, obj.Unmarshal(data) return obj, obj.Unmarshal(data)
}
} }
data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
if len(data) != 0 { if len(data) != 0 {
return nil, getECInfoError(tx, cnr, data) return nil, getECInfoError(tx, cnr, data)
} }

View file

@ -139,7 +139,8 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID var containerID cid.ID
var offset []byte var offset []byte
bc := newBucketCache() graveyardBkt := tx.Bucket(graveyardBucketName)
garbageBkt := tx.Bucket(garbageBucketName)
rawAddr := make([]byte, cidSize, addressKeySize) rawAddr := make([]byte, cidSize, addressKeySize)
@ -168,7 +169,7 @@ loop:
bkt := tx.Bucket(name) bkt := tx.Bucket(name)
if bkt != nil { if bkt != nil {
copy(rawAddr, cidRaw) copy(rawAddr, cidRaw)
result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
result, count, cursor, threshold, currEpoch) result, count, cursor, threshold, currEpoch)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -203,10 +204,9 @@ loop:
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find // selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects. // object to start selecting from. Ignores inhumed objects.
func selectNFromBucket( func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
bc *bucketCache,
bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket objType objectSDK.Type, // type of the objects stored in the main bucket
graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID cnt cid.ID, // container ID
to []objectcore.Info, // listing result to []objectcore.Info, // listing result
@ -219,6 +219,7 @@ func selectNFromBucket(
cursor = new(Cursor) cursor = new(Cursor)
} }
count := len(to)
c := bkt.Cursor() c := bkt.Cursor()
k, v := c.First() k, v := c.First()
@ -230,7 +231,7 @@ func selectNFromBucket(
} }
for ; k != nil; k, v = c.Next() { for ; k != nil; k, v = c.Next() {
if len(to) >= limit { if count >= limit {
break break
} }
@ -240,8 +241,6 @@ func selectNFromBucket(
} }
offset = k offset = k
graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue continue
} }
@ -252,7 +251,7 @@ func selectNFromBucket(
} }
expEpoch, hasExpEpoch := hasExpirationEpoch(&o) expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch {
continue continue
} }
@ -274,6 +273,7 @@ func selectNFromBucket(
a.SetContainer(cnt) a.SetContainer(cnt)
a.SetObject(obj) a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
count++
} }
return to, offset, cursor, nil return to, offset, cursor, nil

View file

@ -59,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N { for range b.N {
res, err := db.ListWithCursor(context.Background(), prm) res, err := db.ListWithCursor(context.Background(), prm)
if err != nil { if err != nil {
if !errors.Is(err, meta.ErrEndOfListing) { if errors.Is(err, meta.ErrEndOfListing) {
b.Fatalf("error: %v", err) b.Fatalf("error: %v", err)
} }
prm.SetCursor(nil) prm.SetCursor(nil)

View file

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"slices"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
@ -163,11 +162,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
// checks if specified object is locked in the specified container. // checks if specified object is locked in the specified container.
func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
return objectLockedWithCache(nil, tx, idCnr, idObj) bucketLocked := tx.Bucket(bucketNameLocked)
}
func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool {
bucketLocked := getLockedBucket(bc, tx)
if bucketLocked != nil { if bucketLocked != nil {
key := make([]byte, cidSize) key := make([]byte, cidSize)
idCnr.Encode(key) idCnr.Encode(key)
@ -255,7 +250,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
unlockedObjects = append(unlockedObjects, addr) unlockedObjects = append(unlockedObjects, addr)
} else { } else {
// exclude locker // exclude locker
keyLockers = slices.Delete(keyLockers, i, i+1) keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
v, err = encodeList(keyLockers) v, err = encodeList(keyLockers)
if err != nil { if err != nil {

View file

@ -37,7 +37,7 @@ func TestResetDropsContainerBuckets(t *testing.T) {
for idx := range 100 { for idx := range 100 {
var putPrm PutPrm var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject()) putPrm.SetObject(testutil.GenerateObject())
putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
_, err := db.Put(context.Background(), putPrm) _, err := db.Put(context.Background(), putPrm)
require.NoError(t, err) require.NoError(t, err)
} }

View file

@ -131,7 +131,6 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
res := make([]oid.Address, 0, len(mAddr)) res := make([]oid.Address, 0, len(mAddr))
bc := newBucketCache()
for a, ind := range mAddr { for a, ind := range mAddr {
if ind != expLen { if ind != expLen {
continue // ignore objects with unmatched fast filters continue // ignore objects with unmatched fast filters
@ -146,7 +145,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address var addr oid.Address
addr.SetContainer(cnr) addr.SetContainer(cnr)
addr.SetObject(id) addr.SetObject(id)
st, err := objectStatusWithCache(bc, tx, addr, currEpoch) st, err := objectStatus(tx, addr, currEpoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -154,7 +153,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects continue // ignore removed objects
} }
addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
if !match { if !match {
continue // ignore objects with unmatched slow filters continue // ignore objects with unmatched slow filters
} }
@ -452,13 +451,13 @@ func (db *DB) selectObjectID(
} }
// matchSlowFilters return true if object header is matched by all slow filters. // matchSlowFilters return true if object header is matched by all slow filters.
func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
result := addr result := addr
if len(f) == 0 { if len(f) == 0 {
return result, true return result, true
} }
obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch)
if err != nil { if err != nil {
return result, false return result, false
} }
@ -516,9 +515,9 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
return result, true return result, true
} }
func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
buf := make([]byte, addressKeySize) buf := make([]byte, addressKeySize)
obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) obj, err := db.get(tx, addr, buf, true, false, currEpoch)
if err != nil { if err != nil {
var ecInfoError *objectSDK.ECInfoError var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) { if errors.As(err, &ecInfoError) {
@ -528,7 +527,7 @@ func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Ad
continue continue
} }
addr.SetObject(objID) addr.SetObject(objID)
obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) obj, err = db.get(tx, addr, buf, true, false, currEpoch)
if err == nil { if err == nil {
return obj, true, nil return obj, true, nil
} }

View file

@ -1216,8 +1216,6 @@ func TestExpiredObjects(t *testing.T) {
} }
func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) {
b.ReportAllocs()
var prm meta.SelectPrm var prm meta.SelectPrm
prm.SetContainerID(cid) prm.SetContainerID(cid)
prm.SetFilters(fs) prm.SetFilters(fs)

View file

@ -1077,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol
} }
// TreeSortedByFilename implements the Forest interface. // TreeSortedByFilename implements the Forest interface.
func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) {
var ( var (
startedAt = time.Now() startedAt = time.Now()
success = false success = false
@ -1155,7 +1155,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
} }
if len(res) != 0 { if len(res) != 0 {
s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) s := string(findAttr(res[len(res)-1].Meta, AttributeFilename))
last = NewCursor(s, res[len(res)-1].LastChild()) last = &s
} }
return res, last, metaerr.Wrap(err) return res, last, metaerr.Wrap(err)
} }
@ -1166,10 +1166,10 @@ func sortByFilename(nodes []NodeInfo) {
}) })
} }
func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
var lastBytes []byte var lastBytes []byte
if last != nil { if last != nil {
lastBytes = []byte(last.GetFilename()) lastBytes = []byte(*last)
} }
sortByFilename(result) sortByFilename(result)

View file

@ -164,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
} }
// TreeSortedByFilename implements the Forest interface. // TreeSortedByFilename implements the Forest interface.
func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) {
fullID := cid.String() + "/" + treeID fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID] s, ok := f.treeMap[fullID]
if !ok { if !ok {
@ -204,14 +204,17 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
r := mergeNodeInfos(res) r := mergeNodeInfos(res)
for i := range r { for i := range r {
if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start {
finish := min(len(res), i+count) finish := i + count
if len(res) < finish {
finish = len(res)
}
last := string(findAttr(r[finish-1].Meta, AttributeFilename)) last := string(findAttr(r[finish-1].Meta, AttributeFilename))
return r[i:finish], NewCursor(last, 0), nil return r[i:finish], &last, nil
} }
} }
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
return nil, NewCursor(last, 0), nil return nil, &last, nil
} }
// TreeGetChildren implements the Forest interface. // TreeGetChildren implements the Forest interface.

View file

@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
} }
var result []MultiNodeInfo var result []MultiNodeInfo
treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err) require.NoError(t, err)
result = append(result, res...) result = append(result, res...)
@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
} }
var result []MultiNodeInfo var result []MultiNodeInfo
treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
require.NoError(t, err) require.NoError(t, err)
result = append(result, res...) result = append(result, res...)

Some files were not shown because too many files have changed in this diff Show more