forked from TrueCloudLab/frostfs-node
Compare commits
93 commits
remove-dan
...
master
Author | SHA1 | Date | |
---|---|---|---|
163e2e9f83 | |||
0c664fa804 | |||
0a9d139e20 | |||
3bb1fb744a | |||
ccdd6cb767 | |||
73e35bc885 | |||
eed0824590 | |||
a4da1da767 | |||
30099194ba | |||
e7e91ef634 | |||
4919b6a206 | |||
d951289131 | |||
016f2e11e3 | |||
9aa486c9d8 | |||
af76350bfb | |||
3fa5c22ddf | |||
5385f9994f | |||
eea46a599d | |||
049a650b89 | |||
3f4717a37f | |||
60cea8c714 | |||
7df2912a83 | |||
affab25512 | |||
45b7796151 | |||
e8801dbf49 | |||
eb9df85b98 | |||
21bed3362c | |||
af5b3575d0 | |||
a49f0717b3 | |||
a7ac30da9c | |||
39f549a7ab | |||
760b6a44ea | |||
a11b2d27e4 | |||
a405fb1f39 | |||
a7319bc979 | |||
fc743cc537 | |||
54ef71a92f | |||
91c7b39232 | |||
ef6ac751df | |||
fde2649e60 | |||
07a660fbc4 | |||
7893d763d1 | |||
ff4e9b6ae1 | |||
997759994a | |||
ecb6b0793c | |||
460e5cbccf | |||
155d3ddb6e | |||
40536d8a06 | |||
d66bffb191 | |||
bcc84c85a0 | |||
737788b35f | |||
2005fdda09 | |||
597bce7a87 | |||
4ed2bbdb0f | |||
3727d60331 | |||
d36afa31c7 | |||
8643e0abc5 | |||
bd61f7bf0a | |||
df6d9da82a | |||
aab8addae0 | |||
9e31cb249f | |||
6260d703ce | |||
a17c3356fa | |||
471aeeaff3 | |||
4c8f9580a1 | |||
bf8914fedc | |||
5ba0e2918e | |||
4685afb1dc | |||
eb8b9b2b3b | |||
6c6e463b73 | |||
401d96a89e | |||
8ed71a969e | |||
c2d855aedd | |||
2162f8e189 | |||
b9360be1dc | |||
ceff5e1f6a | |||
e0dc3c3d0c | |||
92a67a6716 | |||
98d6125029 | |||
0991077cb3 | |||
c660271039 | |||
92ab58984b | |||
dae0949f6e | |||
5590886599 | |||
f0b2017057 | |||
dce269c62e | |||
a97bded440 | |||
9a0507704a | |||
2ff032db90 | |||
37972a91c1 | |||
003d568ae2 | |||
b2adf1109e | |||
02f3a7f65c |
213 changed files with 4499 additions and 3901 deletions
83
.ci/Jenkinsfile
vendored
Normal file
83
.ci/Jenkinsfile
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
def golang = ['1.23', '1.24']
|
||||
def golangDefault = "golang:${golang.last()}"
|
||||
|
||||
async {
|
||||
|
||||
for (version in golang) {
|
||||
def go = version
|
||||
|
||||
task("test/go${go}") {
|
||||
container("golang:${go}") {
|
||||
sh 'make test'
|
||||
}
|
||||
}
|
||||
|
||||
task("build/go${go}") {
|
||||
container("golang:${go}") {
|
||||
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
|
||||
sh """
|
||||
make bin/frostfs-${app}
|
||||
bin/frostfs-${app} --version
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task('test/race') {
|
||||
container(golangDefault) {
|
||||
sh 'make test GOFLAGS="-count=1 -race"'
|
||||
}
|
||||
}
|
||||
|
||||
task('lint') {
|
||||
container(golangDefault) {
|
||||
sh 'make lint-install lint'
|
||||
}
|
||||
}
|
||||
|
||||
task('staticcheck') {
|
||||
container(golangDefault) {
|
||||
sh 'make staticcheck-install staticcheck-run'
|
||||
}
|
||||
}
|
||||
|
||||
task('gopls') {
|
||||
container(golangDefault) {
|
||||
sh 'make gopls-install gopls-run'
|
||||
}
|
||||
}
|
||||
|
||||
task('gofumpt') {
|
||||
container(golangDefault) {
|
||||
sh '''
|
||||
make fumpt-install
|
||||
make fumpt
|
||||
git diff --exit-code --quiet
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
task('vulncheck') {
|
||||
container(golangDefault) {
|
||||
sh '''
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
govulncheck ./...
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
task('pre-commit') {
|
||||
dockerfile("""
|
||||
FROM ${golangDefault}
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends pre-commit
|
||||
""") {
|
||||
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
|
||||
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: dco check
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
go_versions: [ '1.23', '1.24' ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.23
|
||||
go-version: 1.24
|
||||
- name: Set up Python
|
||||
run: |
|
||||
apt update
|
||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
go-version: '1.24'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
|
@ -30,7 +30,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
go_versions: [ '1.23', '1.24' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -53,7 +53,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.24'
|
||||
cache: true
|
||||
|
||||
- name: Run tests
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
go-version: '1.24'
|
||||
cache: true
|
||||
|
||||
- name: Install staticcheck
|
||||
|
@ -104,7 +104,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
go-version: '1.24'
|
||||
cache: true
|
||||
|
||||
- name: Install gofumpt
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
go-version: '1.24'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
|
|
|
@ -22,6 +22,11 @@ linters-settings:
|
|||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: true
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
custom-order: true
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
|
@ -72,6 +77,7 @@ linters:
|
|||
- durationcheck
|
||||
- exhaustive
|
||||
- copyloopvar
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
- misspell
|
||||
|
|
42
Makefile
42
Makefile
|
@ -1,5 +1,6 @@
|
|||
#!/usr/bin/make -f
|
||||
SHELL = bash
|
||||
.SHELLFLAGS = -euo pipefail -c
|
||||
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
|
@ -7,7 +8,7 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
|
|||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.22
|
||||
GO_VERSION ?= 1.23
|
||||
LINT_VERSION ?= 1.62.2
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
|
||||
PROTOC_VERSION ?= 25.0
|
||||
|
@ -16,7 +17,7 @@ PROTOC_OS_VERSION=osx-x86_64
|
|||
ifeq ($(shell uname), Linux)
|
||||
PROTOC_OS_VERSION=linux-x86_64
|
||||
endif
|
||||
STATICCHECK_VERSION ?= 2024.1.1
|
||||
STATICCHECK_VERSION ?= 2025.1.1
|
||||
ARCH = amd64
|
||||
|
||||
BIN = bin
|
||||
|
@ -42,7 +43,7 @@ GOFUMPT_VERSION ?= v0.7.0
|
|||
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
|
||||
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
|
||||
|
||||
GOPLS_VERSION ?= v0.15.1
|
||||
GOPLS_VERSION ?= v0.17.1
|
||||
GOPLS_DIR ?= $(abspath $(BIN))/gopls
|
||||
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
|
||||
GOPLS_TEMP_FILE := $(shell mktemp)
|
||||
|
@ -115,7 +116,7 @@ protoc:
|
|||
# Install protoc
|
||||
protoc-install:
|
||||
@rm -rf $(PROTOBUF_DIR)
|
||||
@mkdir $(PROTOBUF_DIR)
|
||||
@mkdir -p $(PROTOBUF_DIR)
|
||||
@echo "⇒ Installing protoc... "
|
||||
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
||||
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
||||
|
@ -169,7 +170,7 @@ imports:
|
|||
# Install gofumpt
|
||||
fumpt-install:
|
||||
@rm -rf $(GOFUMPT_DIR)
|
||||
@mkdir $(GOFUMPT_DIR)
|
||||
@mkdir -p $(GOFUMPT_DIR)
|
||||
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
||||
|
||||
# Run gofumpt
|
||||
|
@ -186,14 +187,37 @@ test:
|
|||
@echo "⇒ Running go test"
|
||||
@GOFLAGS="$(GOFLAGS)" go test ./...
|
||||
|
||||
# Install Gerrit commit-msg hook
|
||||
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
|
||||
review-install:
|
||||
@git config remote.review.url \
|
||||
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
|
||||
@mkdir -p $(GIT_HOOK_DIR)/
|
||||
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
|
||||
@chmod +x $(GIT_HOOK_DIR)/commit-msg
|
||||
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
|
||||
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
|
||||
|
||||
# Create a PR in Gerrit
|
||||
review: BRANCH ?= master
|
||||
review:
|
||||
@git push review HEAD:refs/for/$(BRANCH) \
|
||||
--push-option r=e.stratonikov@yadro.com \
|
||||
--push-option r=d.stepanov@yadro.com \
|
||||
--push-option r=an.nikiforov@yadro.com \
|
||||
--push-option r=a.arifullin@yadro.com \
|
||||
--push-option r=ekaterina.lebedeva@yadro.com \
|
||||
--push-option r=a.savchuk@yadro.com \
|
||||
--push-option r=a.chuprov@yadro.com
|
||||
|
||||
# Run pre-commit
|
||||
pre-commit-run:
|
||||
@pre-commit run -a --hook-stage manual
|
||||
|
||||
# Install linters
|
||||
lint-install:
|
||||
lint-install: $(BIN)
|
||||
@rm -rf $(OUTPUT_LINT_DIR)
|
||||
@mkdir $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(TMP_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||
|
@ -212,7 +236,7 @@ lint:
|
|||
# Install staticcheck
|
||||
staticcheck-install:
|
||||
@rm -rf $(STATICCHECK_DIR)
|
||||
@mkdir $(STATICCHECK_DIR)
|
||||
@mkdir -p $(STATICCHECK_DIR)
|
||||
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||
|
||||
# Run staticcheck
|
||||
|
@ -225,7 +249,7 @@ staticcheck-run:
|
|||
# Install gopls
|
||||
gopls-install:
|
||||
@rm -rf $(GOPLS_DIR)
|
||||
@mkdir $(GOPLS_DIR)
|
||||
@mkdir -p $(GOPLS_DIR)
|
||||
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
|
||||
|
||||
# Run gopls
|
||||
|
|
|
@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
|||
nbuf := make([]byte, 8)
|
||||
copy(nbuf[:], v)
|
||||
n := binary.LittleEndian.Uint64(nbuf)
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
|
||||
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
|
||||
if len(v) == 0 || len(v) > 1 {
|
||||
return helper.InvalidConfigValueErr(k)
|
||||
}
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
|
||||
default:
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
|
|||
if info.version == "" {
|
||||
info.version = "unknown"
|
||||
}
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
|
||||
info.name, info.version, info.hash.StringLE())))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
|
||||
info.name, info.version, info.hash.StringLE()))
|
||||
}
|
||||
_ = tw.Flush()
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ const (
|
|||
subjectNameFlag = "subject-name"
|
||||
subjectKeyFlag = "subject-key"
|
||||
subjectAddressFlag = "subject-address"
|
||||
includeNamesFlag = "include-names"
|
||||
extendedFlag = "extended"
|
||||
groupNameFlag = "group-name"
|
||||
groupIDFlag = "group-id"
|
||||
|
||||
|
@ -209,7 +209,7 @@ func initFrostfsIDListSubjectsCmd() {
|
|||
Cmd.AddCommand(frostfsidListSubjectsCmd)
|
||||
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
|
||||
frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||
frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
|
||||
}
|
||||
|
||||
func initFrostfsIDCreateGroupCmd() {
|
||||
|
@ -256,7 +256,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
|
|||
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
|
||||
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
|
||||
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||
frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
|
||||
}
|
||||
|
||||
func initFrostfsIDSetKVCmd() {
|
||||
|
@ -336,7 +336,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
||||
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
|
||||
extended, _ := cmd.Flags().GetBool(extendedFlag)
|
||||
ns := getFrostfsIDNamespace(cmd)
|
||||
inv, _, hash := initInvoker(cmd)
|
||||
reader := frostfsidrpclient.NewReader(inv, hash)
|
||||
|
@ -349,21 +349,19 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
|||
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
|
||||
|
||||
for _, addr := range subAddresses {
|
||||
if !includeNames {
|
||||
if !extended {
|
||||
cmd.Println(address.Uint160ToString(addr))
|
||||
continue
|
||||
}
|
||||
|
||||
sessionID, it, err := reader.ListSubjects()
|
||||
items, err := reader.GetSubject(addr)
|
||||
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
subj, err := frostfsidclient.ParseSubject(items)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
|
||||
|
||||
cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
|
||||
printSubjectInfo(cmd, addr, subj)
|
||||
cmd.Println()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -483,7 +481,7 @@ func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
|
|||
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
||||
ns := getFrostfsIDNamespace(cmd)
|
||||
groupID := getFrostfsIDGroupID(cmd)
|
||||
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
|
||||
extended, _ := cmd.Flags().GetBool(extendedFlag)
|
||||
inv, cs, hash := initInvoker(cmd)
|
||||
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
|
||||
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
|
||||
|
@ -501,7 +499,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
|||
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
|
||||
|
||||
for _, subjAddr := range subjects {
|
||||
if !includeNames {
|
||||
if !extended {
|
||||
cmd.Println(address.Uint160ToString(subjAddr))
|
||||
continue
|
||||
}
|
||||
|
@ -510,7 +508,8 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
||||
subj, err := frostfsidclient.ParseSubject(items)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
|
||||
cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
|
||||
printSubjectInfo(cmd, subjAddr, subj)
|
||||
cmd.Println()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -600,3 +599,30 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
|
|||
|
||||
return inv, cs, nmHash
|
||||
}
|
||||
|
||||
func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
|
||||
cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
|
||||
pk := "<nil>"
|
||||
if subj.PrimaryKey != nil {
|
||||
pk = subj.PrimaryKey.String()
|
||||
}
|
||||
cmd.Printf("Primary key: %s\n", pk)
|
||||
cmd.Printf("Name: %s\n", subj.Name)
|
||||
cmd.Printf("Namespace: %s\n", subj.Namespace)
|
||||
if len(subj.AdditionalKeys) > 0 {
|
||||
cmd.Printf("Additional keys:\n")
|
||||
for _, key := range subj.AdditionalKeys {
|
||||
k := "<nil>"
|
||||
if key != nil {
|
||||
k = key.String()
|
||||
}
|
||||
cmd.Printf("- %s\n", k)
|
||||
}
|
||||
}
|
||||
if len(subj.KV) > 0 {
|
||||
cmd.Printf("KV:\n")
|
||||
for k, v := range subj.KV {
|
||||
cmd.Printf("- %s: %s\n", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package helper
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
|
@ -118,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
|
|||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
for _, key := range NetmapConfigKeys {
|
||||
if k == key {
|
||||
md[k] = v
|
||||
break
|
||||
}
|
||||
if slices.Contains(NetmapConfigKeys, k) {
|
||||
md[k] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -22,15 +22,14 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
gasInitialTotalSupply = 30000000 * native.GASFactor
|
||||
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
|
||||
initialAlphabetGASAmount = 10_000 * native.GASFactor
|
||||
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
|
||||
initialProxyGASAmount = 50_000 * native.GASFactor
|
||||
)
|
||||
|
||||
func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
|
||||
return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
|
||||
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
|
||||
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
|
||||
}
|
||||
|
||||
func transferFunds(c *helper.InitializeContext) error {
|
||||
|
@ -42,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error {
|
|||
return err
|
||||
}
|
||||
|
||||
version, err := c.Client.GetVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var transfers []transferTarget
|
||||
for _, acc := range c.Accounts {
|
||||
to := acc.Contract.ScriptHash()
|
||||
|
@ -59,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error {
|
|||
transferTarget{
|
||||
Token: gas.Hash,
|
||||
Address: c.CommitteeAcc.Contract.ScriptHash(),
|
||||
Amount: initialCommitteeGASAmount(c),
|
||||
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
|
||||
},
|
||||
transferTarget{
|
||||
Token: neo.Hash,
|
||||
|
@ -83,16 +87,23 @@ func transferFunds(c *helper.InitializeContext) error {
|
|||
// transferFundsFinished checks balances of accounts we transfer GAS to.
|
||||
// The stage is considered finished if the balance is greater than the half of what we need to transfer.
|
||||
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
|
||||
acc := c.Accounts[0]
|
||||
|
||||
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
|
||||
res, err := r.BalanceOf(acc.Contract.ScriptHash())
|
||||
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
|
||||
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
version, err := c.Client.GetVersion()
|
||||
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
|
||||
return false, err
|
||||
}
|
||||
|
||||
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
|
||||
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
|
||||
}
|
||||
|
||||
func transferGASToProxy(c *helper.InitializeContext) error {
|
||||
|
|
|
@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
|||
buf := bytes.NewBuffer(nil)
|
||||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
|
||||
|
||||
_ = tw.Flush()
|
||||
cmd.Print(buf.String())
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
|
||||
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
||||
|
@ -41,7 +40,6 @@ func init() {
|
|||
|
||||
rootCmd.AddCommand(config.RootCmd)
|
||||
rootCmd.AddCommand(morph.RootCmd)
|
||||
rootCmd.AddCommand(storagecfg.RootCmd)
|
||||
rootCmd.AddCommand(metabase.RootCmd)
|
||||
|
||||
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
||||
|
|
|
@ -1,137 +0,0 @@
|
|||
package storagecfg
|
||||
|
||||
const configTemplate = `logger:
|
||||
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
|
||||
|
||||
node:
|
||||
wallet:
|
||||
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
|
||||
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
|
||||
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
|
||||
addresses: # list of addresses announced by Storage node in the Network map
|
||||
- {{ .AnnouncedAddress }}
|
||||
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
|
||||
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
|
||||
|
||||
grpc:
|
||||
num: 1 # total number of listener endpoints
|
||||
0:
|
||||
endpoint: {{ .Endpoint }} # endpoint for gRPC server
|
||||
tls:{{if .TLSCert}}
|
||||
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
|
||||
certificate: {{ .TLSCert }} # path to TLS certificate
|
||||
key: {{ .TLSKey }} # path to TLS key
|
||||
{{- else }}
|
||||
enabled: false # disable TLS for a gRPC connection
|
||||
{{- end}}
|
||||
|
||||
control:
|
||||
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
|
||||
{{- range .AuthorizedKeys }}
|
||||
- {{.}}{{end}}
|
||||
grpc:
|
||||
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
|
||||
|
||||
morph:
|
||||
dial_timeout: 20s # timeout for side chain NEO RPC client connection
|
||||
cache_ttl: 15s # use TTL cache for side chain GET operations
|
||||
rpc_endpoint: # side chain N3 RPC endpoints
|
||||
{{- range .MorphRPC }}
|
||||
- address: wss://{{.}}/ws{{end}}
|
||||
{{if not .Relay }}
|
||||
storage:
|
||||
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
|
||||
|
||||
shard:
|
||||
default: # section with the default shard parameters
|
||||
metabase:
|
||||
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
||||
|
||||
blobstor:
|
||||
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||
depth: 2 # max depth of object tree storage in FS
|
||||
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
|
||||
compress: true # turn on/off Zstandard compression (level 3) of stored objects
|
||||
compression_exclude_content_types:
|
||||
- audio/*
|
||||
- video/*
|
||||
|
||||
blobovnicza:
|
||||
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
||||
depth: 1 # max depth of object tree storage in key-value DB
|
||||
width: 4 # max width of object tree storage in key-value DB
|
||||
opened_cache_capacity: 50 # maximum number of opened database files
|
||||
opened_cache_ttl: 5m # ttl for opened database file
|
||||
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
|
||||
|
||||
gc:
|
||||
remover_batch_size: 200 # number of objects to be removed by the garbage collector
|
||||
remover_sleep_interval: 5m # frequency of the garbage collector invocation
|
||||
0:
|
||||
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
|
||||
|
||||
metabase:
|
||||
path: {{ .MetabasePath }} # path to the metabase
|
||||
|
||||
blobstor:
|
||||
path: {{ .BlobstorPath }} # path to the blobstor
|
||||
{{end}}`
|
||||
|
||||
const (
|
||||
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
|
||||
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
|
||||
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
|
||||
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
|
||||
)
|
||||
|
||||
var n3config = map[string]struct {
|
||||
MorphRPC []string
|
||||
RPC []string
|
||||
NeoFSContract string
|
||||
BalanceContract string
|
||||
}{
|
||||
"testnet": {
|
||||
MorphRPC: []string{
|
||||
"rpc01.morph.testnet.fs.neo.org:51331",
|
||||
"rpc02.morph.testnet.fs.neo.org:51331",
|
||||
"rpc03.morph.testnet.fs.neo.org:51331",
|
||||
"rpc04.morph.testnet.fs.neo.org:51331",
|
||||
"rpc05.morph.testnet.fs.neo.org:51331",
|
||||
"rpc06.morph.testnet.fs.neo.org:51331",
|
||||
"rpc07.morph.testnet.fs.neo.org:51331",
|
||||
},
|
||||
RPC: []string{
|
||||
"rpc01.testnet.n3.nspcc.ru:21331",
|
||||
"rpc02.testnet.n3.nspcc.ru:21331",
|
||||
"rpc03.testnet.n3.nspcc.ru:21331",
|
||||
"rpc04.testnet.n3.nspcc.ru:21331",
|
||||
"rpc05.testnet.n3.nspcc.ru:21331",
|
||||
"rpc06.testnet.n3.nspcc.ru:21331",
|
||||
"rpc07.testnet.n3.nspcc.ru:21331",
|
||||
},
|
||||
NeoFSContract: neofsTestnetAddress,
|
||||
BalanceContract: balanceTestnetAddress,
|
||||
},
|
||||
"mainnet": {
|
||||
MorphRPC: []string{
|
||||
"rpc1.morph.fs.neo.org:40341",
|
||||
"rpc2.morph.fs.neo.org:40341",
|
||||
"rpc3.morph.fs.neo.org:40341",
|
||||
"rpc4.morph.fs.neo.org:40341",
|
||||
"rpc5.morph.fs.neo.org:40341",
|
||||
"rpc6.morph.fs.neo.org:40341",
|
||||
"rpc7.morph.fs.neo.org:40341",
|
||||
},
|
||||
RPC: []string{
|
||||
"rpc1.n3.nspcc.ru:10331",
|
||||
"rpc2.n3.nspcc.ru:10331",
|
||||
"rpc3.n3.nspcc.ru:10331",
|
||||
"rpc4.n3.nspcc.ru:10331",
|
||||
"rpc5.n3.nspcc.ru:10331",
|
||||
"rpc6.n3.nspcc.ru:10331",
|
||||
"rpc7.n3.nspcc.ru:10331",
|
||||
},
|
||||
NeoFSContract: neofsMainnetAddress,
|
||||
BalanceContract: balanceMainnetAddress,
|
||||
},
|
||||
}
|
|
@ -1,433 +0,0 @@
|
|||
package storagecfg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
"github.com/chzyer/readline"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/cli/input"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
walletFlag = "wallet"
|
||||
accountFlag = "account"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultControlEndpoint = "localhost:8090"
|
||||
defaultDataEndpoint = "localhost"
|
||||
)
|
||||
|
||||
// RootCmd is a root command of config section.
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
|
||||
Short: "Section for storage node configuration commands",
|
||||
Run: storageConfig,
|
||||
}
|
||||
|
||||
func init() {
|
||||
fs := RootCmd.Flags()
|
||||
|
||||
fs.StringP(walletFlag, "w", "", "Path to wallet")
|
||||
fs.StringP(accountFlag, "a", "", "Wallet account")
|
||||
}
|
||||
|
||||
type config struct {
|
||||
AnnouncedAddress string
|
||||
AuthorizedKeys []string
|
||||
ControlEndpoint string
|
||||
Endpoint string
|
||||
TLSCert string
|
||||
TLSKey string
|
||||
MorphRPC []string
|
||||
Attribute struct {
|
||||
Locode string
|
||||
}
|
||||
Wallet struct {
|
||||
Path string
|
||||
Account string
|
||||
Password string
|
||||
}
|
||||
Relay bool
|
||||
BlobstorPath string
|
||||
MetabasePath string
|
||||
}
|
||||
|
||||
func storageConfig(cmd *cobra.Command, args []string) {
|
||||
outPath := getOutputPath(args)
|
||||
|
||||
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
|
||||
readline.SetHistoryPath(historyPath)
|
||||
|
||||
var c config
|
||||
|
||||
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
|
||||
if c.Wallet.Path == "" {
|
||||
c.Wallet.Path = getPath("Path to the storage node wallet: ")
|
||||
}
|
||||
|
||||
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
|
||||
fatalOnErr(err)
|
||||
|
||||
fillWalletAccount(cmd, &c, w)
|
||||
|
||||
accH, err := flags.ParseAddress(c.Wallet.Account)
|
||||
fatalOnErr(err)
|
||||
|
||||
acc := w.GetAccount(accH)
|
||||
if acc == nil {
|
||||
fatalOnErr(errors.New("can't find account in wallet"))
|
||||
}
|
||||
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||
fatalOnErr(err)
|
||||
|
||||
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||
fatalOnErr(err)
|
||||
|
||||
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
|
||||
|
||||
network := readNetwork(cmd)
|
||||
|
||||
c.MorphRPC = n3config[network].MorphRPC
|
||||
|
||||
depositGas(cmd, acc, network)
|
||||
|
||||
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
|
||||
|
||||
endpoint := getDefaultEndpoint(cmd, &c)
|
||||
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
|
||||
if c.Endpoint == "" {
|
||||
c.Endpoint = endpoint
|
||||
}
|
||||
|
||||
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
|
||||
if c.ControlEndpoint == "" {
|
||||
c.ControlEndpoint = defaultControlEndpoint
|
||||
}
|
||||
|
||||
c.TLSCert = getPath("TLS Certificate (optional): ")
|
||||
if c.TLSCert != "" {
|
||||
c.TLSKey = getPath("TLS Key: ")
|
||||
}
|
||||
|
||||
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
|
||||
if !c.Relay {
|
||||
p := getPath("Path to the storage directory (all available storage will be used): ")
|
||||
c.BlobstorPath = filepath.Join(p, "blob")
|
||||
c.MetabasePath = filepath.Join(p, "meta")
|
||||
}
|
||||
|
||||
out := applyTemplate(c)
|
||||
fatalOnErr(os.WriteFile(outPath, out, 0o644))
|
||||
|
||||
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
|
||||
}
|
||||
|
||||
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
|
||||
var addr, port string
|
||||
for {
|
||||
c.AnnouncedAddress = getString("Publicly announced address: ")
|
||||
validator := netutil.Address{}
|
||||
err := validator.FromString(c.AnnouncedAddress)
|
||||
if err != nil {
|
||||
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
|
||||
continue
|
||||
}
|
||||
uriAddr, err := url.Parse(validator.URIAddr())
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
addr = uriAddr.Hostname()
|
||||
port = uriAddr.Port()
|
||||
ip, err := net.ResolveIPAddr("ip", addr)
|
||||
if err != nil {
|
||||
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ip.IP.IsGlobalUnicast() {
|
||||
cmd.Println("IP must be global unicast.")
|
||||
continue
|
||||
}
|
||||
cmd.Printf("Resolved IP address: %s\n", ip.String())
|
||||
|
||||
_, err = strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
cmd.Println("Port must be an integer.")
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
return net.JoinHostPort(defaultDataEndpoint, port)
|
||||
}
|
||||
|
||||
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
|
||||
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
|
||||
if c.Wallet.Account == "" {
|
||||
addr := address.Uint160ToString(w.GetChangeAddress())
|
||||
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
|
||||
if c.Wallet.Account == "" {
|
||||
c.Wallet.Account = addr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readNetwork(cmd *cobra.Command) string {
|
||||
var network string
|
||||
for {
|
||||
network = getString("Choose network [mainnet]/testnet: ")
|
||||
switch network {
|
||||
case "":
|
||||
network = "mainnet"
|
||||
case "testnet", "mainnet":
|
||||
default:
|
||||
cmd.Println(`Network must be either "mainnet" or "testnet"`)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return network
|
||||
}
|
||||
|
||||
func getOutputPath(args []string) string {
|
||||
if len(args) != 0 {
|
||||
return args[0]
|
||||
}
|
||||
outPath := getPath("File to write config at [./config.yml]: ")
|
||||
if outPath == "" {
|
||||
outPath = "./config.yml"
|
||||
}
|
||||
return outPath
|
||||
}
|
||||
|
||||
func getWalletAccount(w *wallet.Wallet, prompt string) string {
|
||||
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
|
||||
for i := range w.Accounts {
|
||||
addrs[i] = readline.PcItem(w.Accounts[i].Address)
|
||||
}
|
||||
|
||||
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
|
||||
defer readline.SetAutoComplete(nil)
|
||||
|
||||
s, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
|
||||
}
|
||||
|
||||
func getString(prompt string) string {
|
||||
s, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
if s != "" {
|
||||
_ = readline.AddHistory(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type filenameCompleter struct{}
|
||||
|
||||
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
|
||||
prefix := string(line[:pos])
|
||||
dir := filepath.Dir(prefix)
|
||||
de, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
for i := range de {
|
||||
name := filepath.Join(dir, de[i].Name())
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
tail := []rune(strings.TrimPrefix(name, prefix))
|
||||
if de[i].IsDir() {
|
||||
tail = append(tail, filepath.Separator)
|
||||
}
|
||||
newLine = append(newLine, tail)
|
||||
}
|
||||
}
|
||||
if pos != 0 {
|
||||
return newLine, pos - len([]rune(dir))
|
||||
}
|
||||
return newLine, 0
|
||||
}
|
||||
|
||||
func getPath(prompt string) string {
|
||||
readline.SetAutoComplete(filenameCompleter{})
|
||||
defer readline.SetAutoComplete(nil)
|
||||
|
||||
p, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
|
||||
if p == "" {
|
||||
return p
|
||||
}
|
||||
|
||||
_ = readline.AddHistory(p)
|
||||
|
||||
abs, err := filepath.Abs(p)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
|
||||
}
|
||||
|
||||
return abs
|
||||
}
|
||||
|
||||
func getConfirmation(def bool, prompt string) bool {
|
||||
for {
|
||||
s, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
|
||||
switch strings.ToLower(s) {
|
||||
case "y", "yes":
|
||||
return true
|
||||
case "n", "no":
|
||||
return false
|
||||
default:
|
||||
if len(s) == 0 {
|
||||
return def
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func applyTemplate(c config) []byte {
|
||||
tmpl, err := template.New("config").Parse(configTemplate)
|
||||
fatalOnErr(err)
|
||||
|
||||
b := bytes.NewBuffer(nil)
|
||||
fatalOnErr(tmpl.Execute(b, c))
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func fatalOnErr(err error) {
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
|
||||
sideClient := initClient(n3config[network].MorphRPC)
|
||||
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
|
||||
|
||||
sideActor, err := actor.NewSimple(sideClient, acc)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
|
||||
}
|
||||
|
||||
sideGas := nep17.NewReader(sideActor, balanceHash)
|
||||
accSH := acc.Contract.ScriptHash()
|
||||
|
||||
balance, err := sideGas.BalanceOf(accSH)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
|
||||
}
|
||||
|
||||
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
|
||||
fixedn.ToString(balance, 12)))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
amountStr := getString("Enter amount in GAS: ")
|
||||
amount, err := fixedn.FromString(amountStr, 8)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
|
||||
}
|
||||
|
||||
mainClient := initClient(n3config[network].RPC)
|
||||
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
|
||||
|
||||
mainActor, err := actor.NewSimple(mainClient, acc)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
|
||||
}
|
||||
|
||||
mainGas := nep17.New(mainActor, gas.Hash)
|
||||
|
||||
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
|
||||
}
|
||||
|
||||
cmd.Print("Waiting for transactions to persist.")
|
||||
tick := time.NewTicker(time.Second / 2)
|
||||
defer tick.Stop()
|
||||
|
||||
timer := time.NewTimer(time.Second * 20)
|
||||
defer timer.Stop()
|
||||
|
||||
at := trigger.Application
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
_, err := mainClient.GetApplicationLog(txHash, &at)
|
||||
if err == nil {
|
||||
cmd.Print("\n")
|
||||
break loop
|
||||
}
|
||||
cmd.Print(".")
|
||||
case <-timer.C:
|
||||
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
|
||||
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
|
||||
return
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initClient(rpc []string) *rpcclient.Client {
|
||||
var c *rpcclient.Client
|
||||
var err error
|
||||
|
||||
shuffled := slices.Clone(rpc)
|
||||
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||
|
||||
for _, endpoint := range shuffled {
|
||||
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
|
||||
DialTimeout: time.Second * 2,
|
||||
RequestTimeout: time.Second * 5,
|
||||
})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err = c.Init(); err != nil {
|
||||
continue
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
|
||||
panic("unreachable")
|
||||
}
|
|
@ -9,7 +9,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
|
@ -77,9 +76,7 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
|||
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||
list := x.cliRes.Containers()
|
||||
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
|
||||
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
|
||||
})
|
||||
slices.SortFunc(list, cid.ID.Cmp)
|
||||
return list
|
||||
}
|
||||
|
||||
|
@ -687,9 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
|||
return nil, fmt.Errorf("read object list: %w", err)
|
||||
}
|
||||
|
||||
slices.SortFunc(list, func(a, b oid.ID) int {
|
||||
return strings.Compare(a.EncodeToString(), b.EncodeToString())
|
||||
})
|
||||
slices.SortFunc(list, oid.ID.Cmp)
|
||||
|
||||
return &SearchObjectsRes{
|
||||
ids: list,
|
||||
|
|
|
@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
|
|||
prmDial := client.PrmDial{
|
||||
Endpoint: addr.URIAddr(),
|
||||
GRPCDialOptions: []grpc.DialOption{
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
|
||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||
},
|
||||
|
|
|
@ -28,7 +28,7 @@ const (
|
|||
RPC = "rpc-endpoint"
|
||||
RPCShorthand = "r"
|
||||
RPCDefault = ""
|
||||
RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
|
||||
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')"
|
||||
|
||||
Timeout = "timeout"
|
||||
TimeoutShorthand = "t"
|
||||
|
|
|
@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
|
|||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||
_, _ = tw.Write([]byte("#\tName\tType\n"))
|
||||
for i, t := range targets {
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
|
||||
}
|
||||
_ = tw.Flush()
|
||||
cmd.Print(buf.String())
|
||||
|
|
117
cmd/frostfs-cli/modules/control/locate.go
Normal file
117
cmd/frostfs-cli/modules/control/locate.go
Normal file
|
@ -0,0 +1,117 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
FullInfoFlag = "full"
|
||||
FullInfoFlagUsage = "Print full ShardInfo."
|
||||
)
|
||||
|
||||
var locateObjectCmd = &cobra.Command{
|
||||
Use: "locate-object",
|
||||
Short: "List shards storing the object",
|
||||
Long: "List shards storing the object",
|
||||
Run: locateObject,
|
||||
}
|
||||
|
||||
func initControlLocateObjectCmd() {
|
||||
initControlFlags(locateObjectCmd)
|
||||
|
||||
flags := locateObjectCmd.Flags()
|
||||
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
_ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||
|
||||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
||||
_ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
|
||||
flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
|
||||
}
|
||||
|
||||
func locateObject(cmd *cobra.Command, _ []string) {
|
||||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
_ = object.ReadObjectAddress(cmd, &cnr, &obj)
|
||||
|
||||
pk := key.Get(cmd)
|
||||
|
||||
body := new(control.ListShardsForObjectRequest_Body)
|
||||
body.SetContainerId(cnr.EncodeToString())
|
||||
body.SetObjectId(obj.EncodeToString())
|
||||
req := new(control.ListShardsForObjectRequest)
|
||||
req.SetBody(body)
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var err error
|
||||
var resp *control.ListShardsForObjectResponse
|
||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = control.ListShardsForObject(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
shardIDs := resp.GetBody().GetShard_ID()
|
||||
|
||||
isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
|
||||
if !isFull {
|
||||
for _, id := range shardIDs {
|
||||
cmd.Println(base58.Encode(id))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// get full shard info
|
||||
listShardsReq := new(control.ListShardsRequest)
|
||||
listShardsReq.SetBody(new(control.ListShardsRequest_Body))
|
||||
signRequest(cmd, pk, listShardsReq)
|
||||
var listShardsResp *control.ListShardsResponse
|
||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
||||
listShardsResp, err = control.ListShards(client, listShardsReq)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
|
||||
|
||||
shards := listShardsResp.GetBody().GetShards()
|
||||
sortShardsByID(shards)
|
||||
shards = filterShards(shards, shardIDs)
|
||||
|
||||
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
|
||||
if isJSON {
|
||||
prettyPrintShardsJSON(cmd, shards)
|
||||
} else {
|
||||
prettyPrintShards(cmd, shards)
|
||||
}
|
||||
}
|
||||
|
||||
func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
|
||||
var res []control.ShardInfo
|
||||
for _, id := range ids {
|
||||
for _, inf := range info {
|
||||
if bytes.Equal(inf.Shard_ID, id) {
|
||||
res = append(res, inf)
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
|
@ -39,6 +39,7 @@ func init() {
|
|||
listRulesCmd,
|
||||
getRuleCmd,
|
||||
listTargetsCmd,
|
||||
locateObjectCmd,
|
||||
)
|
||||
|
||||
initControlHealthCheckCmd()
|
||||
|
@ -52,4 +53,5 @@ func init() {
|
|||
initControlListRulesCmd()
|
||||
initControGetRuleCmd()
|
||||
initControlListTargetsCmd()
|
||||
initControlLocateObjectCmd()
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
|
|||
var sealWritecacheShardCmd = &cobra.Command{
|
||||
Use: "seal",
|
||||
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
|
||||
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
|
||||
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
|
||||
Run: sealWritecache,
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
|
||||
}
|
||||
|
||||
objAddr = readObjectAddress(cmd, &cnr, &obj)
|
||||
objAddr = ReadObjectAddress(cmd, &cnr, &obj)
|
||||
}
|
||||
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
|
|
|
@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
|
|||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
||||
|
||||
filename := cmd.Flag(fileFlag).Value.String()
|
||||
out, closer := createOutWriter(cmd, filename)
|
||||
|
|
|
@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
||||
|
||||
ranges, err := getRangeList(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "", err)
|
||||
|
|
|
@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
|
|||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
|
|
@ -101,7 +101,7 @@ func initObjectNodesCmd() {
|
|||
func objectNodes(cmd *cobra.Command, _ []string) {
|
||||
var cnrID cid.ID
|
||||
var objID oid.ID
|
||||
readObjectAddress(cmd, &cnrID, &objID)
|
||||
ReadObjectAddress(cmd, &cnrID, &objID)
|
||||
|
||||
pk := key.GetOrGenerate(cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
|
|
@ -56,7 +56,7 @@ func patch(cmd *cobra.Command, _ []string) {
|
|||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
||||
|
||||
ranges, err := getRangeSlice(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "", err)
|
||||
|
|
|
@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
|
|||
var cnr cid.ID
|
||||
var obj oid.ID
|
||||
|
||||
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
||||
|
||||
ranges, err := getRangeList(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "", err)
|
||||
|
|
|
@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
|
|||
return xs
|
||||
}
|
||||
|
||||
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
|
||||
func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
|
||||
readCID(cmd, cnr)
|
||||
readOID(cmd, obj)
|
||||
|
||||
|
|
|
@ -33,12 +33,13 @@ func _client() (tree.TreeServiceClient, error) {
|
|||
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithChainUnaryInterceptor(
|
||||
tracing.NewUnaryClientInteceptor(),
|
||||
tracing.NewUnaryClientInterceptor(),
|
||||
),
|
||||
grpc.WithChainStreamInterceptor(
|
||||
tracing.NewStreamClientInterceptor(),
|
||||
),
|
||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||
grpc.WithDisableServiceConfig(),
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -38,13 +39,14 @@ func reloadConfig() error {
|
|||
}
|
||||
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
||||
audit.Store(cfg.GetBool("audit.enabled"))
|
||||
var logPrm logger.Prm
|
||||
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
||||
log.Reload(logPrm)
|
||||
|
||||
return logPrm.Reload()
|
||||
return nil
|
||||
}
|
||||
|
||||
func watchForSignal(ctx context.Context, cancel func()) {
|
||||
|
|
|
@ -31,7 +31,6 @@ const (
|
|||
var (
|
||||
wg = new(sync.WaitGroup)
|
||||
intErr = make(chan error) // internal inner ring errors
|
||||
logPrm = new(logger.Prm)
|
||||
innerRing *innerring.Server
|
||||
pprofCmp *pprofComponent
|
||||
metricsCmp *httpComponent
|
||||
|
@ -70,6 +69,7 @@ func main() {
|
|||
|
||||
metrics := irMetrics.NewInnerRingMetrics()
|
||||
|
||||
var logPrm logger.Prm
|
||||
err = logPrm.SetLevelString(
|
||||
cfg.GetString("logger.level"),
|
||||
)
|
||||
|
|
|
@ -2,13 +2,17 @@ package meta
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
||||
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
|
||||
"github.com/rivo/tview"
|
||||
"github.com/spf13/cobra"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var tuiCMD = &cobra.Command{
|
||||
|
@ -27,6 +31,11 @@ Available search filters:
|
|||
|
||||
var initialPrompt string
|
||||
|
||||
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
|
||||
2: schema.MetabaseParserV2,
|
||||
3: schema.MetabaseParserV3,
|
||||
}
|
||||
|
||||
func init() {
|
||||
common.AddComponentPathFlag(tuiCMD, &vPath)
|
||||
|
||||
|
@ -49,12 +58,22 @@ func runTUI(cmd *cobra.Command) error {
|
|||
}
|
||||
defer db.Close()
|
||||
|
||||
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
|
||||
if !hasVersion {
|
||||
return errors.New("couldn't detect schema version")
|
||||
}
|
||||
|
||||
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown schema version %d", schemaVersion)
|
||||
}
|
||||
|
||||
// Need if app was stopped with Ctrl-C.
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
app := tview.NewApplication()
|
||||
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
|
||||
ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
|
||||
|
||||
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
|
||||
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
|
||||
|
@ -69,3 +88,31 @@ func runTUI(cmd *cobra.Command) error {
|
|||
app.SetRoot(ui, true).SetFocus(ui)
|
||||
return app.Run()
|
||||
}
|
||||
|
||||
var (
|
||||
shardInfoBucket = []byte{5}
|
||||
versionRecord = []byte("version")
|
||||
)
|
||||
|
||||
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
|
||||
err := db.View(func(tx *bbolt.Tx) error {
|
||||
bkt := tx.Bucket(shardInfoBucket)
|
||||
if bkt == nil {
|
||||
return nil
|
||||
}
|
||||
rec := bkt.Get(versionRecord)
|
||||
if rec == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
version = binary.LittleEndian.Uint64(rec)
|
||||
ok = true
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -80,10 +80,15 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
UserAttributeParser = NewUserAttributeKeyBucketParser(
|
||||
UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
|
||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
||||
)
|
||||
|
||||
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
|
||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
||||
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
|
||||
)
|
||||
|
||||
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
|
||||
cidResolver: StrictResolver,
|
||||
oidResolver: StrictResolver,
|
||||
|
@ -108,4 +113,14 @@ var (
|
|||
cidResolver: StrictResolver,
|
||||
oidResolver: LenientResolver,
|
||||
})
|
||||
|
||||
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
|
||||
cidResolver: LenientResolver,
|
||||
oidResolver: LenientResolver,
|
||||
})
|
||||
|
||||
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
|
||||
cidResolver: StrictResolver,
|
||||
oidResolver: LenientResolver,
|
||||
})
|
||||
)
|
||||
|
|
|
@ -22,27 +22,31 @@ const (
|
|||
Split
|
||||
ContainerCounters
|
||||
ECInfo
|
||||
ExpirationEpochToObject
|
||||
ObjectToExpirationEpoch
|
||||
)
|
||||
|
||||
var x = map[Prefix]string{
|
||||
Graveyard: "Graveyard",
|
||||
Garbage: "Garbage",
|
||||
ToMoveIt: "To Move It",
|
||||
ContainerVolume: "Container Volume",
|
||||
Locked: "Locked",
|
||||
ShardInfo: "Shard Info",
|
||||
Primary: "Primary",
|
||||
Lockers: "Lockers",
|
||||
Tombstone: "Tombstone",
|
||||
Small: "Small",
|
||||
Root: "Root",
|
||||
Owner: "Owner",
|
||||
UserAttribute: "User Attribute",
|
||||
PayloadHash: "Payload Hash",
|
||||
Parent: "Parent",
|
||||
Split: "Split",
|
||||
ContainerCounters: "Container Counters",
|
||||
ECInfo: "EC Info",
|
||||
Graveyard: "Graveyard",
|
||||
Garbage: "Garbage",
|
||||
ToMoveIt: "To Move It",
|
||||
ContainerVolume: "Container Volume",
|
||||
Locked: "Locked",
|
||||
ShardInfo: "Shard Info",
|
||||
Primary: "Primary",
|
||||
Lockers: "Lockers",
|
||||
Tombstone: "Tombstone",
|
||||
Small: "Small",
|
||||
Root: "Root",
|
||||
Owner: "Owner",
|
||||
UserAttribute: "User Attribute",
|
||||
PayloadHash: "Payload Hash",
|
||||
Parent: "Parent",
|
||||
Split: "Split",
|
||||
ContainerCounters: "Container Counters",
|
||||
ECInfo: "EC Info",
|
||||
ExpirationEpochToObject: "Exp. Epoch to Object",
|
||||
ObjectToExpirationEpoch: "Object to Exp. Epoch",
|
||||
}
|
||||
|
||||
func (p Prefix) String() string {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
func (b *PrefixBucket) String() string {
|
||||
return common.FormatSimple(
|
||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
|
|||
return fmt.Sprintf(
|
||||
"%s CID %s",
|
||||
common.FormatSimple(
|
||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
),
|
||||
common.FormatSimple(b.id.String(), tcell.ColorAqua),
|
||||
)
|
||||
|
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
|
|||
func (b *UserAttributeKeyBucket) String() string {
|
||||
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
|
||||
common.FormatSimple(
|
||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
),
|
||||
common.FormatSimple(
|
||||
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
|
||||
|
|
|
@ -2,6 +2,7 @@ package buckets
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"slices"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -57,10 +58,11 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrNotBucket = errors.New("not a bucket")
|
||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
||||
ErrInvalidValueLength = errors.New("invalid value length")
|
||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
||||
ErrNotBucket = errors.New("not a bucket")
|
||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
||||
ErrInvalidValueLength = errors.New("invalid value length")
|
||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
||||
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
|
||||
)
|
||||
|
||||
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
|
||||
|
@ -132,6 +134,10 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
|
|||
}
|
||||
|
||||
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
||||
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
|
||||
}
|
||||
|
||||
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
|
||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
||||
if value != nil {
|
||||
return nil, nil, ErrNotBucket
|
||||
|
@ -147,6 +153,11 @@ func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
|||
return nil, nil, err
|
||||
}
|
||||
b.key = string(key[33:])
|
||||
|
||||
if len(keys) != 0 && !slices.Contains(keys, b.key) {
|
||||
return nil, nil, ErrUnexpectedAttributeKey
|
||||
}
|
||||
|
||||
return &b, next, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,30 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
|
||||
)
|
||||
|
||||
var MetabaseParser = common.WithFallback(
|
||||
var MetabaseParserV3 = common.WithFallback(
|
||||
common.Any(
|
||||
buckets.GraveyardParser,
|
||||
buckets.GarbageParser,
|
||||
buckets.ContainerVolumeParser,
|
||||
buckets.LockedParser,
|
||||
buckets.ShardInfoParser,
|
||||
buckets.PrimaryParser,
|
||||
buckets.LockersParser,
|
||||
buckets.TombstoneParser,
|
||||
buckets.SmallParser,
|
||||
buckets.RootParser,
|
||||
buckets.UserAttributeParserV3,
|
||||
buckets.ParentParser,
|
||||
buckets.SplitParser,
|
||||
buckets.ContainerCountersParser,
|
||||
buckets.ECInfoParser,
|
||||
buckets.ExpirationEpochToObjectParser,
|
||||
buckets.ObjectToExpirationEpochParser,
|
||||
),
|
||||
common.RawParser.ToFallbackParser(),
|
||||
)
|
||||
|
||||
var MetabaseParserV2 = common.WithFallback(
|
||||
common.Any(
|
||||
buckets.GraveyardParser,
|
||||
buckets.GarbageParser,
|
||||
|
@ -18,7 +41,7 @@ var MetabaseParser = common.WithFallback(
|
|||
buckets.SmallParser,
|
||||
buckets.RootParser,
|
||||
buckets.OwnerParser,
|
||||
buckets.UserAttributeParser,
|
||||
buckets.UserAttributeParserV2,
|
||||
buckets.PayloadHashParser,
|
||||
buckets.ParentParser,
|
||||
buckets.SplitParser,
|
||||
|
|
|
@ -63,3 +63,11 @@ func (r *ContainerCountersRecord) DetailedString() string {
|
|||
func (r *ECInfoRecord) DetailedString() string {
|
||||
return spew.Sdump(*r)
|
||||
}
|
||||
|
||||
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
|
||||
return spew.Sdump(*r)
|
||||
}
|
||||
|
||||
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
|
||||
return spew.Sdump(*r)
|
||||
}
|
||||
|
|
|
@ -143,3 +143,26 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
|
|||
return common.No
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
|
||||
switch typ {
|
||||
case "cid":
|
||||
id := val.(cid.ID)
|
||||
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
|
||||
case "oid":
|
||||
id := val.(oid.ID)
|
||||
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
|
||||
default:
|
||||
return common.No
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
|
||||
switch typ {
|
||||
case "oid":
|
||||
id := val.(oid.ID)
|
||||
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
|
||||
default:
|
||||
return common.No
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,3 +249,45 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
|
|||
}
|
||||
return &r, nil, nil
|
||||
}
|
||||
|
||||
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
|
||||
if len(key) != 72 {
|
||||
return nil, nil, ErrInvalidKeyLength
|
||||
}
|
||||
|
||||
var (
|
||||
r ExpirationEpochToObjectRecord
|
||||
err error
|
||||
)
|
||||
|
||||
r.epoch = binary.BigEndian.Uint64(key[:8])
|
||||
if err = r.cnt.Decode(key[8:40]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = r.obj.Decode(key[40:]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &r, nil, nil
|
||||
}
|
||||
|
||||
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
||||
if len(key) != 32 {
|
||||
return nil, nil, ErrInvalidKeyLength
|
||||
}
|
||||
if len(value) != 8 {
|
||||
return nil, nil, ErrInvalidValueLength
|
||||
}
|
||||
|
||||
var (
|
||||
r ObjectToExpirationEpochRecord
|
||||
err error
|
||||
)
|
||||
|
||||
if err = r.obj.Decode(key); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r.epoch = binary.LittleEndian.Uint64(value)
|
||||
|
||||
return &r, nil, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package records
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
|
@ -133,3 +134,22 @@ func (r *ECInfoRecord) String() string {
|
|||
len(r.ids),
|
||||
)
|
||||
}
|
||||
|
||||
func (r *ExpirationEpochToObjectRecord) String() string {
|
||||
return fmt.Sprintf(
|
||||
"exp. epoch %s %c CID %s OID %s",
|
||||
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
|
||||
tview.Borders.Vertical,
|
||||
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
|
||||
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
|
||||
)
|
||||
}
|
||||
|
||||
func (r *ObjectToExpirationEpochRecord) String() string {
|
||||
return fmt.Sprintf(
|
||||
"OID %s %c exp. epoch %s",
|
||||
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
|
||||
tview.Borders.Vertical,
|
||||
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -79,4 +79,15 @@ type (
|
|||
id oid.ID
|
||||
ids []oid.ID
|
||||
}
|
||||
|
||||
ExpirationEpochToObjectRecord struct {
|
||||
epoch uint64
|
||||
cnt cid.ID
|
||||
obj oid.ID
|
||||
}
|
||||
|
||||
ObjectToExpirationEpochRecord struct {
|
||||
obj oid.ID
|
||||
epoch uint64
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package tui
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/gdamore/tcell/v2"
|
||||
"github.com/rivo/tview"
|
||||
)
|
||||
|
@ -26,7 +28,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
|
|||
|
||||
// Used history data for search prompt, so just make that data recent.
|
||||
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
|
||||
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
|
||||
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
|
||||
f.history = append(f.history, s)
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
||||
|
@ -69,6 +70,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -115,7 +117,6 @@ type applicationConfiguration struct {
|
|||
|
||||
EngineCfg struct {
|
||||
errorThreshold uint32
|
||||
shardPoolSize uint32
|
||||
shards []shardCfg
|
||||
lowMem bool
|
||||
}
|
||||
|
@ -134,6 +135,7 @@ type shardCfg struct {
|
|||
refillMetabase bool
|
||||
refillMetabaseWorkersCount int
|
||||
mode shardmode.Mode
|
||||
limiter qos.Limiter
|
||||
|
||||
metaCfg struct {
|
||||
path string
|
||||
|
@ -247,45 +249,47 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
// Storage Engine
|
||||
|
||||
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
||||
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
|
||||
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
|
||||
|
||||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
|
||||
var newConfig shardCfg
|
||||
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
|
||||
var target shardCfg
|
||||
|
||||
newConfig.refillMetabase = oldConfig.RefillMetabase()
|
||||
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
|
||||
newConfig.mode = oldConfig.Mode()
|
||||
newConfig.compress = oldConfig.Compress()
|
||||
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
|
||||
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
|
||||
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
|
||||
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
|
||||
target.refillMetabase = source.RefillMetabase()
|
||||
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
|
||||
target.mode = source.Mode()
|
||||
target.compress = source.Compress()
|
||||
target.estimateCompressibility = source.EstimateCompressibility()
|
||||
target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold()
|
||||
target.uncompressableContentType = source.UncompressableContentTypes()
|
||||
target.smallSizeObjectLimit = source.SmallSizeLimit()
|
||||
|
||||
a.setShardWriteCacheConfig(&newConfig, oldConfig)
|
||||
a.setShardWriteCacheConfig(&target, source)
|
||||
|
||||
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
|
||||
a.setShardPiloramaConfig(c, &target, source)
|
||||
|
||||
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
|
||||
if err := a.setShardStorageConfig(&target, source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.setMetabaseConfig(&newConfig, oldConfig)
|
||||
a.setMetabaseConfig(&target, source)
|
||||
|
||||
a.setGCConfig(&newConfig, oldConfig)
|
||||
a.setGCConfig(&target, source)
|
||||
if err := a.setLimiter(&target, source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
|
||||
a.EngineCfg.shards = append(a.EngineCfg.shards, target)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
writeCacheCfg := oldConfig.WriteCache()
|
||||
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
|
||||
writeCacheCfg := source.WriteCache()
|
||||
if writeCacheCfg.Enabled() {
|
||||
wc := &newConfig.writecacheCfg
|
||||
wc := &target.writecacheCfg
|
||||
|
||||
wc.enabled = true
|
||||
wc.path = writeCacheCfg.Path()
|
||||
|
@ -298,10 +302,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
|
|||
}
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
|
||||
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
||||
piloramaCfg := oldConfig.Pilorama()
|
||||
pr := &newConfig.piloramaCfg
|
||||
piloramaCfg := source.Pilorama()
|
||||
pr := &target.piloramaCfg
|
||||
|
||||
pr.enabled = true
|
||||
pr.path = piloramaCfg.Path()
|
||||
|
@ -312,8 +316,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC
|
|||
}
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
|
||||
blobStorCfg := oldConfig.BlobStor()
|
||||
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
|
||||
blobStorCfg := source.BlobStor()
|
||||
storagesCfg := blobStorCfg.Storages()
|
||||
|
||||
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
||||
|
@ -347,13 +351,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
|
|||
ss = append(ss, sCfg)
|
||||
}
|
||||
|
||||
newConfig.subStorages = ss
|
||||
target.subStorages = ss
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
metabaseCfg := oldConfig.Metabase()
|
||||
m := &newConfig.metaCfg
|
||||
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
|
||||
metabaseCfg := source.Metabase()
|
||||
m := &target.metaCfg
|
||||
|
||||
m.path = metabaseCfg.Path()
|
||||
m.perm = metabaseCfg.BoltDB().Perm()
|
||||
|
@ -361,12 +365,25 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon
|
|||
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
gcCfg := oldConfig.GC()
|
||||
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
|
||||
gcCfg := source.GC()
|
||||
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
|
||||
limitsConfig := source.Limits()
|
||||
limiter, err := qos.NewLimiter(limitsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if target.limiter != nil {
|
||||
target.limiter.Close()
|
||||
}
|
||||
target.limiter = limiter
|
||||
return nil
|
||||
}
|
||||
|
||||
// internals contains application-specific internals that are created
|
||||
|
@ -456,7 +473,6 @@ type shared struct {
|
|||
// dynamicConfiguration stores parameters of the
|
||||
// components that supports runtime reconfigurations.
|
||||
type dynamicConfiguration struct {
|
||||
logger *logger.Prm
|
||||
pprof *httpComponent
|
||||
metrics *httpComponent
|
||||
}
|
||||
|
@ -528,6 +544,8 @@ type cfgGRPC struct {
|
|||
maxChunkSize uint64
|
||||
maxAddrAmount uint64
|
||||
reconnectTimeout time.Duration
|
||||
|
||||
limiter atomic.Pointer[limiting.SemaphoreLimiter]
|
||||
}
|
||||
|
||||
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
|
||||
|
@ -664,10 +682,6 @@ type cfgAccessPolicyEngine struct {
|
|||
}
|
||||
|
||||
type cfgObjectRoutines struct {
|
||||
putRemote *ants.Pool
|
||||
|
||||
putLocal *ants.Pool
|
||||
|
||||
replication *ants.Pool
|
||||
}
|
||||
|
||||
|
@ -699,7 +713,8 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
|
||||
netState.metrics = c.metricsCollector
|
||||
|
||||
logPrm := c.loggerPrm()
|
||||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
|
@ -852,14 +867,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
|
|||
}
|
||||
}
|
||||
|
||||
func initCfgGRPC() cfgGRPC {
|
||||
func initCfgGRPC() (cfg cfgGRPC) {
|
||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
|
||||
|
||||
return cfgGRPC{
|
||||
maxChunkSize: maxChunkSize,
|
||||
maxAddrAmount: maxAddrAmount,
|
||||
}
|
||||
cfg.maxChunkSize = maxChunkSize
|
||||
cfg.maxAddrAmount = maxAddrAmount
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func initCfgObject(appCfg *config.Config) cfgObject {
|
||||
|
@ -876,7 +891,6 @@ func (c *cfg) engineOpts() []engine.Option {
|
|||
var opts []engine.Option
|
||||
|
||||
opts = append(opts,
|
||||
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
|
||||
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
|
||||
engine.WithLogger(c.log),
|
||||
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
|
||||
|
@ -916,6 +930,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
|
|||
writecache.WithMaxCacheCount(wcRead.countLimit),
|
||||
writecache.WithNoSync(wcRead.noSync),
|
||||
writecache.WithLogger(c.log),
|
||||
writecache.WithQoSLimiter(shCfg.limiter),
|
||||
)
|
||||
}
|
||||
return writeCacheOpts
|
||||
|
@ -1031,6 +1046,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
|||
}
|
||||
if c.metricsCollector != nil {
|
||||
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
|
||||
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
|
||||
}
|
||||
|
||||
var sh shardOptsWithID
|
||||
|
@ -1055,30 +1071,27 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
|||
|
||||
return pool
|
||||
}),
|
||||
shard.WithLimiter(shCfg.limiter),
|
||||
}
|
||||
return sh
|
||||
}
|
||||
|
||||
func (c *cfg) loggerPrm() *logger.Prm {
|
||||
// check if it has been inited before
|
||||
if c.dynamicConfiguration.logger == nil {
|
||||
c.dynamicConfiguration.logger = new(logger.Prm)
|
||||
}
|
||||
|
||||
func (c *cfg) loggerPrm() (logger.Prm, error) {
|
||||
var prm logger.Prm
|
||||
// (re)init read configuration
|
||||
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
|
||||
err := prm.SetLevelString(c.LoggerCfg.level)
|
||||
if err != nil {
|
||||
// not expected since validation should be performed before
|
||||
panic("incorrect log level format: " + c.LoggerCfg.level)
|
||||
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
|
||||
}
|
||||
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
|
||||
err = prm.SetDestination(c.LoggerCfg.destination)
|
||||
if err != nil {
|
||||
// not expected since validation should be performed before
|
||||
panic("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||
}
|
||||
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
prm.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
|
||||
return c.dynamicConfiguration.logger
|
||||
return prm, nil
|
||||
}
|
||||
|
||||
func (c *cfg) LocalAddress() network.AddressGroup {
|
||||
|
@ -1166,21 +1179,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
|||
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
|
||||
var err error
|
||||
|
||||
optNonBlocking := ants.WithNonblocking(true)
|
||||
|
||||
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
|
||||
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
|
||||
fatalOnErr(err)
|
||||
|
||||
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
|
||||
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
|
||||
fatalOnErr(err)
|
||||
|
||||
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
|
||||
if replicatorPoolSize <= 0 {
|
||||
replicatorPoolSize = putRemoteCapacity
|
||||
}
|
||||
|
||||
pool.replication, err = ants.NewPool(replicatorPoolSize)
|
||||
fatalOnErr(err)
|
||||
|
||||
|
@ -1332,11 +1331,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
// all the components are expected to support
|
||||
// Logger's dynamic reconfiguration approach
|
||||
|
||||
// Logger
|
||||
|
||||
logPrm := c.loggerPrm()
|
||||
|
||||
components := c.getComponents(ctx, logPrm)
|
||||
components := c.getComponents(ctx)
|
||||
|
||||
// Object
|
||||
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
|
||||
|
@ -1374,10 +1369,17 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||
func (c *cfg) getComponents(ctx context.Context) []dCmp {
|
||||
var components []dCmp
|
||||
|
||||
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||
components = append(components, dCmp{"logger", func() error {
|
||||
prm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.log.Reload(prm)
|
||||
return nil
|
||||
}})
|
||||
components = append(components, dCmp{"runtime", func() error {
|
||||
setRuntimeParameters(ctx, c)
|
||||
return nil
|
||||
|
@ -1410,17 +1412,13 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
|||
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
|
||||
}
|
||||
|
||||
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func (c *cfg) reloadPools() error {
|
||||
newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
|
||||
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
|
||||
|
||||
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
|
||||
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
|
||||
|
||||
newSize = replicatorconfig.PoolSize(c.appCfg)
|
||||
newSize := replicatorconfig.PoolSize(c.appCfg)
|
||||
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
|
||||
|
||||
return nil
|
||||
|
|
|
@ -12,13 +12,10 @@ import (
|
|||
func TestConfigDir(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfgFileName0 := path.Join(dir, "cfg_00.json")
|
||||
cfgFileName1 := path.Join(dir, "cfg_01.yml")
|
||||
cfgFileName := path.Join(dir, "cfg_01.yml")
|
||||
|
||||
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
|
||||
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
|
||||
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
|
||||
|
||||
c := New("", dir, "")
|
||||
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
|
||||
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
|
||||
}
|
||||
|
|
|
@ -11,10 +11,6 @@ import (
|
|||
|
||||
const (
|
||||
subsection = "storage"
|
||||
|
||||
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
|
||||
// process object PUT operations in a storage engine.
|
||||
ShardPoolSizeDefault = 20
|
||||
)
|
||||
|
||||
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
|
||||
|
@ -65,18 +61,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
|
|||
return nil
|
||||
}
|
||||
|
||||
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
|
||||
//
|
||||
// Returns ShardPoolSizeDefault if the value is not a positive number.
|
||||
func ShardPoolSize(c *config.Config) uint32 {
|
||||
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
|
||||
if v > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return ShardPoolSizeDefault
|
||||
}
|
||||
|
||||
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
|
||||
//
|
||||
// Returns 0 if the the value is missing.
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
|
||||
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
|
@ -53,7 +54,6 @@ func TestEngineSection(t *testing.T) {
|
|||
require.False(t, handlerCalled)
|
||||
|
||||
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
|
||||
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
|
||||
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
|
||||
})
|
||||
|
||||
|
@ -63,7 +63,6 @@ func TestEngineSection(t *testing.T) {
|
|||
num := 0
|
||||
|
||||
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
|
||||
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
|
||||
|
||||
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
|
||||
defer func() {
|
||||
|
@ -76,6 +75,7 @@ func TestEngineSection(t *testing.T) {
|
|||
ss := blob.Storages()
|
||||
pl := sc.Pilorama()
|
||||
gc := sc.GC()
|
||||
limits := sc.Limits()
|
||||
|
||||
switch num {
|
||||
case 0:
|
||||
|
@ -134,6 +134,75 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, false, sc.RefillMetabase())
|
||||
require.Equal(t, mode.ReadOnly, sc.Mode())
|
||||
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
|
||||
|
||||
readLimits := limits.Read()
|
||||
writeLimits := limits.Write()
|
||||
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
|
||||
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
|
||||
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
|
||||
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
|
||||
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
|
||||
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
|
||||
require.ElementsMatch(t, readLimits.Tags,
|
||||
[]limitsconfig.IOTagConfig{
|
||||
{
|
||||
Tag: "internal",
|
||||
Weight: toPtr(20),
|
||||
ReservedOps: toPtr(1000),
|
||||
LimitOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "client",
|
||||
Weight: toPtr(70),
|
||||
ReservedOps: toPtr(10000),
|
||||
},
|
||||
{
|
||||
Tag: "background",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(10000),
|
||||
ReservedOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "writecache",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(25000),
|
||||
},
|
||||
{
|
||||
Tag: "policer",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(25000),
|
||||
},
|
||||
})
|
||||
require.ElementsMatch(t, writeLimits.Tags,
|
||||
[]limitsconfig.IOTagConfig{
|
||||
{
|
||||
Tag: "internal",
|
||||
Weight: toPtr(200),
|
||||
ReservedOps: toPtr(100),
|
||||
LimitOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "client",
|
||||
Weight: toPtr(700),
|
||||
ReservedOps: toPtr(1000),
|
||||
},
|
||||
{
|
||||
Tag: "background",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(1000),
|
||||
ReservedOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "writecache",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(2500),
|
||||
},
|
||||
{
|
||||
Tag: "policer",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(2500),
|
||||
},
|
||||
})
|
||||
case 1:
|
||||
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
|
||||
require.Equal(t, fs.FileMode(0o644), pl.Perm())
|
||||
|
@ -188,6 +257,17 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, true, sc.RefillMetabase())
|
||||
require.Equal(t, mode.ReadWrite, sc.Mode())
|
||||
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
|
||||
|
||||
readLimits := limits.Read()
|
||||
writeLimits := limits.Write()
|
||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
|
||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
|
||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
|
||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
|
||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
|
||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
|
||||
require.Equal(t, 0, len(readLimits.Tags))
|
||||
require.Equal(t, 0, len(writeLimits.Tags))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -201,3 +281,7 @@ func TestEngineSection(t *testing.T) {
|
|||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
||||
|
||||
func toPtr(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
|
|
@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchDelay() time.Duration {
|
||||
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
||||
if d < 0 {
|
||||
d = 0
|
||||
}
|
||||
return d
|
||||
return max(d, 0)
|
||||
}
|
||||
|
||||
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
||||
|
@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchSize() int {
|
||||
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
||||
if s < 0 {
|
||||
s = 0
|
||||
}
|
||||
return s
|
||||
return max(s, 0)
|
||||
}
|
||||
|
||||
// NoSync returns the value of "no_sync" config parameter.
|
||||
|
@ -66,8 +60,5 @@ func (x *Config) NoSync() bool {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) PageSize() int {
|
||||
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
|
||||
if s < 0 {
|
||||
s = 0
|
||||
}
|
||||
return s
|
||||
return max(s, 0)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
|
||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
|
||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||
|
@ -125,6 +126,14 @@ func (x *Config) GC() *gcconfig.Config {
|
|||
)
|
||||
}
|
||||
|
||||
// Limits returns "limits" subsection as a limitsconfig.Config.
|
||||
func (x *Config) Limits() *limitsconfig.Config {
|
||||
return limitsconfig.From(
|
||||
(*config.Config)(x).
|
||||
Sub("limits"),
|
||||
)
|
||||
}
|
||||
|
||||
// RefillMetabase returns the value of "resync_metabase" config parameter.
|
||||
//
|
||||
// Returns false if the value is not a valid bool.
|
||||
|
|
130
cmd/frostfs-node/config/engine/shard/limits/config.go
Normal file
130
cmd/frostfs-node/config/engine/shard/limits/config.go
Normal file
|
@ -0,0 +1,130 @@
|
|||
package limits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
const (
|
||||
NoLimit int64 = math.MaxInt64
|
||||
DefaultIdleTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// From wraps config section into Config.
|
||||
func From(c *config.Config) *Config {
|
||||
return (*Config)(c)
|
||||
}
|
||||
|
||||
// Config is a wrapper over the config section
|
||||
// which provides access to Shard's limits configurations.
|
||||
type Config config.Config
|
||||
|
||||
// Read returns the value of "read" limits config section.
|
||||
func (x *Config) Read() OpConfig {
|
||||
return x.parse("read")
|
||||
}
|
||||
|
||||
// Write returns the value of "write" limits config section.
|
||||
func (x *Config) Write() OpConfig {
|
||||
return x.parse("write")
|
||||
}
|
||||
|
||||
func (x *Config) parse(sub string) OpConfig {
|
||||
c := (*config.Config)(x).Sub(sub)
|
||||
var result OpConfig
|
||||
|
||||
if s := config.Int(c, "max_waiting_ops"); s > 0 {
|
||||
result.MaxWaitingOps = s
|
||||
} else {
|
||||
result.MaxWaitingOps = NoLimit
|
||||
}
|
||||
|
||||
if s := config.Int(c, "max_running_ops"); s > 0 {
|
||||
result.MaxRunningOps = s
|
||||
} else {
|
||||
result.MaxRunningOps = NoLimit
|
||||
}
|
||||
|
||||
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
|
||||
result.IdleTimeout = s
|
||||
} else {
|
||||
result.IdleTimeout = DefaultIdleTimeout
|
||||
}
|
||||
|
||||
result.Tags = tags(c)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type OpConfig struct {
|
||||
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
|
||||
//
|
||||
// Equals NoLimit if the value is not a positive number.
|
||||
MaxWaitingOps int64
|
||||
// MaxRunningOps returns the value of "max_running_ops" config parameter.
|
||||
//
|
||||
// Equals NoLimit if the value is not a positive number.
|
||||
MaxRunningOps int64
|
||||
// IdleTimeout returns the value of "idle_timeout" config parameter.
|
||||
//
|
||||
// Equals DefaultIdleTimeout if the value is not a valid duration.
|
||||
IdleTimeout time.Duration
|
||||
// Tags returns the value of "tags" config parameter.
|
||||
//
|
||||
// Equals nil if the value is not a valid tags config slice.
|
||||
Tags []IOTagConfig
|
||||
}
|
||||
|
||||
type IOTagConfig struct {
|
||||
Tag string
|
||||
Weight *float64
|
||||
LimitOps *float64
|
||||
ReservedOps *float64
|
||||
}
|
||||
|
||||
func tags(c *config.Config) []IOTagConfig {
|
||||
c = c.Sub("tags")
|
||||
var result []IOTagConfig
|
||||
for i := 0; ; i++ {
|
||||
tag := config.String(c, strconv.Itoa(i)+".tag")
|
||||
if tag == "" {
|
||||
return result
|
||||
}
|
||||
|
||||
var tagConfig IOTagConfig
|
||||
tagConfig.Tag = tag
|
||||
|
||||
v := c.Value(strconv.Itoa(i) + ".weight")
|
||||
if v != nil {
|
||||
w, err := cast.ToFloat64E(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.Weight = &w
|
||||
}
|
||||
|
||||
v = c.Value(strconv.Itoa(i) + ".limit_ops")
|
||||
if v != nil {
|
||||
l, err := cast.ToFloat64E(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.LimitOps = &l
|
||||
}
|
||||
|
||||
v = c.Value(strconv.Itoa(i) + ".reserved_ops")
|
||||
if v != nil {
|
||||
r, err := cast.ToFloat64E(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.ReservedOps = &r
|
||||
}
|
||||
|
||||
result = append(result, tagConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func panicOnErr(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
|
@ -52,10 +52,7 @@ func (x *Config) NoSync() bool {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchDelay() time.Duration {
|
||||
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
||||
if d <= 0 {
|
||||
d = 0
|
||||
}
|
||||
return d
|
||||
return max(d, 0)
|
||||
}
|
||||
|
||||
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
||||
|
@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchSize() int {
|
||||
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
||||
if s <= 0 {
|
||||
s = 0
|
||||
}
|
||||
return s
|
||||
return max(s, 0)
|
||||
}
|
||||
|
|
|
@ -21,10 +21,6 @@ const (
|
|||
|
||||
putSubsection = "put"
|
||||
getSubsection = "get"
|
||||
|
||||
// PutPoolSizeDefault is a default value of routine pool size to
|
||||
// process object.Put requests in object service.
|
||||
PutPoolSizeDefault = 10
|
||||
)
|
||||
|
||||
// Put returns structure that provides access to "put" subsection of
|
||||
|
@ -35,30 +31,6 @@ func Put(c *config.Config) PutConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
|
||||
//
|
||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||
func (g PutConfig) PoolSizeRemote() int {
|
||||
v := config.Int(g.cfg, "remote_pool_size")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
||||
return PutPoolSizeDefault
|
||||
}
|
||||
|
||||
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
|
||||
//
|
||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||
func (g PutConfig) PoolSizeLocal() int {
|
||||
v := config.Int(g.cfg, "local_pool_size")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
||||
return PutPoolSizeDefault
|
||||
}
|
||||
|
||||
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
|
||||
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
|
||||
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
|
||||
|
|
|
@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) {
|
|||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
|
||||
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
|
||||
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
|
||||
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
|
||||
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
|
||||
})
|
||||
|
@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) {
|
|||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
|
||||
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
|
||||
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
|
||||
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
|
||||
}
|
||||
|
|
|
@ -11,6 +11,8 @@ const (
|
|||
|
||||
// PutTimeoutDefault is a default timeout of object put request in replicator.
|
||||
PutTimeoutDefault = 5 * time.Second
|
||||
// PoolSizeDefault is a default pool size for put request in replicator.
|
||||
PoolSizeDefault = 10
|
||||
)
|
||||
|
||||
// PutTimeout returns the value of "put_timeout" config parameter
|
||||
|
@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration {
|
|||
|
||||
// PoolSize returns the value of "pool_size" config parameter
|
||||
// from "replicator" section.
|
||||
//
|
||||
// Returns PoolSizeDefault if the value is non-positive integer.
|
||||
func PoolSize(c *config.Config) int {
|
||||
return int(config.IntSafe(c.Sub(subsection), "pool_size"))
|
||||
v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
|
||||
if v > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return PoolSizeDefault
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
|
|||
empty := configtest.EmptyConfig()
|
||||
|
||||
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
|
||||
require.Equal(t, 0, replicatorconfig.PoolSize(empty))
|
||||
require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
|
42
cmd/frostfs-node/config/rpc/config.go
Normal file
42
cmd/frostfs-node/config/rpc/config.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package rpcconfig
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
)
|
||||
|
||||
const (
|
||||
subsection = "rpc"
|
||||
limitsSubsection = "limits"
|
||||
)
|
||||
|
||||
type LimitConfig struct {
|
||||
Methods []string
|
||||
MaxOps int64
|
||||
}
|
||||
|
||||
// Limits returns the "limits" config from "rpc" section.
|
||||
func Limits(c *config.Config) []LimitConfig {
|
||||
c = c.Sub(subsection).Sub(limitsSubsection)
|
||||
|
||||
var limits []LimitConfig
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
si := strconv.FormatUint(i, 10)
|
||||
sc := c.Sub(si)
|
||||
|
||||
methods := config.StringSliceSafe(sc, "methods")
|
||||
if len(methods) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if sc.Value("max_ops") == nil {
|
||||
panic("no max operations for method group")
|
||||
}
|
||||
|
||||
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
|
||||
}
|
||||
|
||||
return limits
|
||||
}
|
77
cmd/frostfs-node/config/rpc/config_test.go
Normal file
77
cmd/frostfs-node/config/rpc/config_test.go
Normal file
|
@ -0,0 +1,77 @@
|
|||
package rpcconfig
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRPCSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
require.Empty(t, Limits(configtest.EmptyConfig()))
|
||||
})
|
||||
|
||||
t.Run("correct config", func(t *testing.T) {
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
limits := Limits(c)
|
||||
require.Len(t, limits, 2)
|
||||
|
||||
limit0 := limits[0]
|
||||
limit1 := limits[1]
|
||||
|
||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
||||
require.Equal(t, limit0.MaxOps, int64(1000))
|
||||
|
||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("no max operations", func(t *testing.T) {
|
||||
const path = "testdata/no_max_ops"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Panics(t, func() { _ = Limits(c) })
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("zero max operations", func(t *testing.T) {
|
||||
const path = "testdata/zero_max_ops"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
limits := Limits(c)
|
||||
require.Len(t, limits, 2)
|
||||
|
||||
limit0 := limits[0]
|
||||
limit1 := limits[1]
|
||||
|
||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
||||
require.Equal(t, limit0.MaxOps, int64(0))
|
||||
|
||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
}
|
3
cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
vendored
Normal file
3
cmd/frostfs-node/config/rpc/testdata/no_max_ops.env
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
18
cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
vendored
Normal file
18
cmd/frostfs-node/config/rpc/testdata/no_max_ops.json
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"rpc": {
|
||||
"limits": [
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
||||
"/neo.fs.v2.object.ObjectService/Put"
|
||||
]
|
||||
},
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/Get"
|
||||
],
|
||||
"max_ops": 10000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
8
cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
vendored
Normal file
8
cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
rpc:
|
||||
limits:
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
||||
- /neo.fs.v2.object.ObjectService/Put
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/Get
|
||||
max_ops: 10000
|
4
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
vendored
Normal file
4
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
||||
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
|
||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
19
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
vendored
Normal file
19
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"rpc": {
|
||||
"limits": [
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
||||
"/neo.fs.v2.object.ObjectService/Put"
|
||||
],
|
||||
"max_ops": 0
|
||||
},
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/Get"
|
||||
],
|
||||
"max_ops": 10000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
9
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
vendored
Normal file
9
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
rpc:
|
||||
limits:
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
||||
- /neo.fs.v2.object.ObjectService/Put
|
||||
max_ops: 0
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/Get
|
||||
max_ops: 10000
|
|
@ -4,14 +4,18 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
||||
rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -134,11 +138,13 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
|
|||
qos.NewUnaryServerInterceptor(),
|
||||
metrics.NewUnaryServerInterceptor(),
|
||||
tracing.NewUnaryServerInterceptor(),
|
||||
qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
|
||||
),
|
||||
grpc.ChainStreamInterceptor(
|
||||
qos.NewStreamServerInterceptor(),
|
||||
metrics.NewStreamServerInterceptor(),
|
||||
tracing.NewStreamServerInterceptor(),
|
||||
qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
|
||||
),
|
||||
}
|
||||
|
||||
|
@ -227,3 +233,54 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
|
|||
|
||||
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
}
|
||||
|
||||
func initRPCLimiter(c *cfg) error {
|
||||
var limits []limiting.KeyLimit
|
||||
for _, l := range rpcconfig.Limits(c.appCfg) {
|
||||
limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
|
||||
}
|
||||
|
||||
if err := validateRPCLimits(c, limits); err != nil {
|
||||
return fmt.Errorf("validate RPC limits: %w", err)
|
||||
}
|
||||
|
||||
limiter, err := limiting.NewSemaphoreLimiter(limits)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create RPC limiter: %w", err)
|
||||
}
|
||||
|
||||
c.cfgGRPC.limiter.Store(limiter)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
|
||||
availableMethods := getAvailableMethods(c.cfgGRPC.servers)
|
||||
for _, limit := range limits {
|
||||
for _, method := range limit.Keys {
|
||||
if _, ok := availableMethods[method]; !ok {
|
||||
return fmt.Errorf("set limit on an unknown method %q", method)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAvailableMethods(servers []grpcServer) map[string]struct{} {
|
||||
res := make(map[string]struct{})
|
||||
for _, server := range servers {
|
||||
for _, method := range getMethodsForServer(server.Server) {
|
||||
res[method] = struct{}{}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getMethodsForServer(server *grpc.Server) []string {
|
||||
var res []string
|
||||
for service, info := range server.GetServiceInfo() {
|
||||
for _, method := range info.Methods {
|
||||
res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -117,6 +117,8 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
||||
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
||||
|
||||
initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
|
||||
|
||||
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
|
||||
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
|
||||
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
||||
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
|
||||
|
@ -172,12 +171,10 @@ func initObjectService(c *cfg) {
|
|||
|
||||
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
||||
|
||||
apeSvc := createAPEService(c, splitSvc)
|
||||
|
||||
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
|
||||
apeSvc := createAPEService(c, &irFetcher, splitSvc)
|
||||
|
||||
var commonSvc objectService.Common
|
||||
commonSvc.Init(&c.internals, aclSvc)
|
||||
commonSvc.Init(&c.internals, apeSvc)
|
||||
|
||||
respSvc := objectService.NewResponseService(
|
||||
&commonSvc,
|
||||
|
@ -284,7 +281,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
})
|
||||
}
|
||||
|
||||
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
|
@ -326,7 +323,6 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
|
|||
c,
|
||||
c.cfgNetmap.state,
|
||||
irFetcher,
|
||||
objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
|
||||
objectwriter.WithLogger(c.log),
|
||||
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
|
||||
)
|
||||
|
@ -430,17 +426,7 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
|
|||
)
|
||||
}
|
||||
|
||||
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
|
||||
return v2.New(
|
||||
apeSvc,
|
||||
c.netMapSource,
|
||||
irFetcher,
|
||||
c.cfgObject.cnrSource,
|
||||
v2.WithLogger(c.log),
|
||||
)
|
||||
}
|
||||
|
||||
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
||||
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
||||
return objectAPE.NewService(
|
||||
objectAPE.NewChecker(
|
||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
|
||||
|
@ -452,6 +438,7 @@ func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *object
|
|||
c.cfgObject.cnrSource,
|
||||
c.binPublicKey,
|
||||
),
|
||||
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
|
||||
splitSvc,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
|
|||
}
|
||||
ioTag, err := qos.FromRawString(rawTag)
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
|
||||
s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,7 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
|
|||
return ctx
|
||||
}
|
||||
}
|
||||
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
case qos.IOTagInternal:
|
||||
for _, pk := range s.allowedInternalPubs {
|
||||
|
@ -87,9 +88,10 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
|
|||
return ctx
|
||||
}
|
||||
}
|
||||
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
default:
|
||||
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
|
||||
s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -22,17 +21,4 @@ func TestValidate(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mainnet", func(t *testing.T) {
|
||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
|
||||
c := config.New(p, "", config.EnvPrefix)
|
||||
require.NoError(t, validateConfig(c))
|
||||
})
|
||||
t.Run("testnet", func(t *testing.T) {
|
||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
|
||||
c := config.New(p, "", config.EnvPrefix)
|
||||
require.NoError(t, validateConfig(c))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -87,14 +87,16 @@ FROSTFS_REPLICATOR_POOL_SIZE=10
|
|||
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
|
||||
|
||||
# Object service section
|
||||
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
||||
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
||||
FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
|
||||
|
||||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
||||
FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
|
||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
||||
|
||||
# Storage engine section
|
||||
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
|
||||
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
|
||||
## 0 shard
|
||||
### Flag to refill Metabase from BlobStor
|
||||
|
@ -154,6 +156,47 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
|
|||
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
|
||||
#### Limit of concurrent workers collecting expired objects by the garbage collector
|
||||
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
|
||||
#### Limits config
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
|
||||
|
||||
## 1 shard
|
||||
### Flag to refill Metabase from BlobStor
|
||||
|
|
|
@ -134,16 +134,30 @@
|
|||
"tombstone_lifetime": 10
|
||||
},
|
||||
"put": {
|
||||
"remote_pool_size": 100,
|
||||
"local_pool_size": 200,
|
||||
"skip_session_token_issuer_verification": true
|
||||
},
|
||||
"get": {
|
||||
"priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
|
||||
}
|
||||
},
|
||||
"rpc": {
|
||||
"limits": [
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
||||
"/neo.fs.v2.object.ObjectService/Put"
|
||||
],
|
||||
"max_ops": 1000
|
||||
},
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/Get"
|
||||
],
|
||||
"max_ops": 10000
|
||||
}
|
||||
]
|
||||
},
|
||||
"storage": {
|
||||
"shard_pool_size": 15,
|
||||
"shard_ro_error_threshold": 100,
|
||||
"shard": {
|
||||
"0": {
|
||||
|
@ -206,6 +220,76 @@
|
|||
"remover_sleep_interval": "2m",
|
||||
"expired_collector_batch_size": 1500,
|
||||
"expired_collector_worker_count": 15
|
||||
},
|
||||
"limits": {
|
||||
"read": {
|
||||
"max_running_ops": 10000,
|
||||
"max_waiting_ops": 1000,
|
||||
"idle_timeout": "30s",
|
||||
"tags": [
|
||||
{
|
||||
"tag": "internal",
|
||||
"weight": 20,
|
||||
"limit_ops": 0,
|
||||
"reserved_ops": 1000
|
||||
},
|
||||
{
|
||||
"tag": "client",
|
||||
"weight": 70,
|
||||
"reserved_ops": 10000
|
||||
},
|
||||
{
|
||||
"tag": "background",
|
||||
"weight": 5,
|
||||
"limit_ops": 10000,
|
||||
"reserved_ops": 0
|
||||
},
|
||||
{
|
||||
"tag": "writecache",
|
||||
"weight": 5,
|
||||
"limit_ops": 25000
|
||||
},
|
||||
{
|
||||
"tag": "policer",
|
||||
"weight": 5,
|
||||
"limit_ops": 25000
|
||||
}
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"max_running_ops": 1000,
|
||||
"max_waiting_ops": 100,
|
||||
"idle_timeout": "45s",
|
||||
"tags": [
|
||||
{
|
||||
"tag": "internal",
|
||||
"weight": 200,
|
||||
"limit_ops": 0,
|
||||
"reserved_ops": 100
|
||||
},
|
||||
{
|
||||
"tag": "client",
|
||||
"weight": 700,
|
||||
"reserved_ops": 1000
|
||||
},
|
||||
{
|
||||
"tag": "background",
|
||||
"weight": 50,
|
||||
"limit_ops": 1000,
|
||||
"reserved_ops": 0
|
||||
},
|
||||
{
|
||||
"tag": "writecache",
|
||||
"weight": 50,
|
||||
"limit_ops": 2500
|
||||
},
|
||||
{
|
||||
"tag": "policer",
|
||||
"weight": 50,
|
||||
"limit_ops": 2500
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"1": {
|
||||
|
|
|
@ -117,17 +117,24 @@ object:
|
|||
delete:
|
||||
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
||||
put:
|
||||
remote_pool_size: 100 # number of async workers for remote PUT operations
|
||||
local_pool_size: 200 # number of async workers for local PUT operations
|
||||
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
||||
get:
|
||||
priority: # list of metrics of nodes for prioritization
|
||||
- $attribute:ClusterName
|
||||
- $attribute:UN-LOCODE
|
||||
|
||||
rpc:
|
||||
limits:
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
||||
- /neo.fs.v2.object.ObjectService/Put
|
||||
max_ops: 1000
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/Get
|
||||
max_ops: 10000
|
||||
|
||||
storage:
|
||||
# note: shard configuration can be omitted for relay node (see `node.relay`)
|
||||
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
|
||||
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
|
||||
|
||||
shard:
|
||||
|
@ -219,6 +226,52 @@ storage:
|
|||
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
|
||||
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
|
||||
|
||||
limits:
|
||||
read:
|
||||
max_running_ops: 10000
|
||||
max_waiting_ops: 1000
|
||||
idle_timeout: 30s
|
||||
tags:
|
||||
- tag: internal
|
||||
weight: 20
|
||||
limit_ops: 0
|
||||
reserved_ops: 1000
|
||||
- tag: client
|
||||
weight: 70
|
||||
reserved_ops: 10000
|
||||
- tag: background
|
||||
weight: 5
|
||||
limit_ops: 10000
|
||||
reserved_ops: 0
|
||||
- tag: writecache
|
||||
weight: 5
|
||||
limit_ops: 25000
|
||||
- tag: policer
|
||||
weight: 5
|
||||
limit_ops: 25000
|
||||
write:
|
||||
max_running_ops: 1000
|
||||
max_waiting_ops: 100
|
||||
idle_timeout: 45s
|
||||
tags:
|
||||
- tag: internal
|
||||
weight: 200
|
||||
limit_ops: 0
|
||||
reserved_ops: 100
|
||||
- tag: client
|
||||
weight: 700
|
||||
reserved_ops: 1000
|
||||
- tag: background
|
||||
weight: 50
|
||||
limit_ops: 1000
|
||||
reserved_ops: 0
|
||||
- tag: writecache
|
||||
weight: 50
|
||||
limit_ops: 2500
|
||||
- tag: policer
|
||||
weight: 50
|
||||
limit_ops: 2500
|
||||
|
||||
1:
|
||||
writecache:
|
||||
path: tmp/1/cache # write-cache root directory
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
# N3 Mainnet Storage node configuration
|
||||
|
||||
Here is a template for simple storage node configuration in N3 Mainnet.
|
||||
Make sure to specify correct values instead of `<...>` placeholders.
|
||||
Do not change `contracts` section. Run the latest frostfs-node release with
|
||||
the fixed config `frostfs-node -c config.yml`
|
||||
|
||||
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
|
||||
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
|
||||
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
|
||||
|
||||
## Tips
|
||||
|
||||
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
|
||||
```yaml
|
||||
node:
|
||||
addresses:
|
||||
- grpcs://frostfs.my.org:8080
|
||||
|
||||
grpc:
|
||||
num: 1
|
||||
0:
|
||||
endpoint: frostfs.my.org:8080
|
||||
tls:
|
||||
enabled: true
|
||||
certificate: /path/to/cert
|
||||
key: /path/to/key
|
||||
```
|
|
@ -1,70 +0,0 @@
|
|||
node:
|
||||
wallet:
|
||||
path: <path/to/wallet>
|
||||
address: <address-in-wallet>
|
||||
password: <password>
|
||||
addresses:
|
||||
- <announced.address:port>
|
||||
attribute_0: UN-LOCODE:<XX YYY>
|
||||
attribute_1: Price:100000
|
||||
attribute_2: User-Agent:FrostFS\/0.9999
|
||||
|
||||
grpc:
|
||||
num: 1
|
||||
0:
|
||||
endpoint: <listen.local.address:port>
|
||||
tls:
|
||||
enabled: false
|
||||
|
||||
storage:
|
||||
shard_num: 1
|
||||
shard:
|
||||
0:
|
||||
metabase:
|
||||
path: /storage/path/metabase
|
||||
perm: 0600
|
||||
blobstor:
|
||||
- path: /storage/path/blobovnicza
|
||||
type: blobovnicza
|
||||
perm: 0600
|
||||
opened_cache_capacity: 32
|
||||
depth: 1
|
||||
width: 1
|
||||
- path: /storage/path/fstree
|
||||
type: fstree
|
||||
perm: 0600
|
||||
depth: 4
|
||||
writecache:
|
||||
enabled: false
|
||||
gc:
|
||||
remover_batch_size: 100
|
||||
remover_sleep_interval: 1m
|
||||
|
||||
logger:
|
||||
level: info
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
address: localhost:9090
|
||||
shutdown_timeout: 15s
|
||||
|
||||
object:
|
||||
put:
|
||||
remote_pool_size: 100
|
||||
local_pool_size: 100
|
||||
|
||||
morph:
|
||||
rpc_endpoint:
|
||||
- wss://rpc1.morph.frostfs.info:40341/ws
|
||||
- wss://rpc2.morph.frostfs.info:40341/ws
|
||||
- wss://rpc3.morph.frostfs.info:40341/ws
|
||||
- wss://rpc4.morph.frostfs.info:40341/ws
|
||||
- wss://rpc5.morph.frostfs.info:40341/ws
|
||||
- wss://rpc6.morph.frostfs.info:40341/ws
|
||||
- wss://rpc7.morph.frostfs.info:40341/ws
|
||||
dial_timeout: 20s
|
||||
|
||||
contracts:
|
||||
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
|
||||
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
|
||||
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
|
|
@ -1,129 +0,0 @@
|
|||
# N3 Testnet Storage node configuration
|
||||
|
||||
There is a prepared configuration for NeoFS Storage Node deployment in
|
||||
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
|
||||
docker image and run it with docker-compose.
|
||||
|
||||
## Build image
|
||||
|
||||
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
|
||||
However, if you need to rebuild it for some reason, run
|
||||
`make image-storage-testnet` command.
|
||||
|
||||
```
|
||||
$ make image-storage-testnet
|
||||
...
|
||||
Successfully built ab0557117b02
|
||||
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
|
||||
```
|
||||
|
||||
## Deploy node
|
||||
|
||||
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
|
||||
update docker-compose file and start the node.
|
||||
|
||||
### Deposit
|
||||
|
||||
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
|
||||
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
|
||||
|
||||
First, obtain GAS in N3 Testnet chain. You can do that with
|
||||
[faucet](https://neowish.ngd.network) service.
|
||||
|
||||
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
|
||||
You can provide scripthash in the `data` argument of transfer tx to make a
|
||||
deposit to a specified account. Otherwise, deposit is made to the tx sender.
|
||||
|
||||
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
|
||||
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
|
||||
|
||||
See a deposit example with `neo-go`.
|
||||
|
||||
```
|
||||
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
|
||||
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
|
||||
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
|
||||
--token GAS \
|
||||
--amount 1
|
||||
```
|
||||
|
||||
### Configure
|
||||
|
||||
Next, configure `node_config.env` file. Change endpoints values. Both
|
||||
should contain your **public** IP.
|
||||
|
||||
```
|
||||
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||
```
|
||||
|
||||
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
|
||||
attribute.
|
||||
|
||||
```
|
||||
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
|
||||
```
|
||||
|
||||
You can validate UN/LOCODE attribute in
|
||||
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
|
||||
with frostfs-cli.
|
||||
|
||||
```
|
||||
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
|
||||
Country: Russia
|
||||
Location: Saint Petersburg (ex Leningrad)
|
||||
Continent: Europe
|
||||
Subdivision: [SPE] Sankt-Peterburg
|
||||
Coordinates: 59.53, 30.15
|
||||
```
|
||||
|
||||
It is recommended to pass the node's key as a file. To do so, convert your wallet
|
||||
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
|
||||
|
||||
```
|
||||
// Print WIF in a 32-byte hex format
|
||||
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
|
||||
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
|
||||
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
|
||||
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
|
||||
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
|
||||
|
||||
// Save 32-byte hex into a file
|
||||
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
|
||||
```
|
||||
|
||||
Then, specify the path to this file in `docker-compose.yml`
|
||||
```yaml
|
||||
volumes:
|
||||
- frostfs_storage:/storage
|
||||
- ./my_wallet.key:/node.key
|
||||
```
|
||||
|
||||
|
||||
NeoFS objects will be stored on your machine. By default, docker-compose
|
||||
is configured to store objects in named docker volume `frostfs_storage`. You can
|
||||
specify a directory on the filesystem to store objects there.
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- /home/username/frostfs/rc3/storage:/storage
|
||||
- ./my_wallet.key:/node.key
|
||||
```
|
||||
|
||||
### Start
|
||||
|
||||
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
|
||||
|
||||
### Debug
|
||||
|
||||
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
|
||||
log, set up log level to debug with this env:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- NEOFS_LOGGER_LEVEL=debug
|
||||
```
|
|
@ -1,52 +0,0 @@
|
|||
logger:
|
||||
level: info
|
||||
|
||||
morph:
|
||||
rpc_endpoint:
|
||||
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
|
||||
dial_timeout: 20s
|
||||
|
||||
contracts:
|
||||
balance: e0420c216003747626670d1424569c17c79015bf
|
||||
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
|
||||
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
|
||||
|
||||
node:
|
||||
key: /node.key
|
||||
attribute_0: Deployed:SelfHosted
|
||||
attribute_1: User-Agent:FrostFS\/0.9999
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
address: localhost:9090
|
||||
shutdown_timeout: 15s
|
||||
|
||||
storage:
|
||||
shard_num: 1
|
||||
shard:
|
||||
0:
|
||||
metabase:
|
||||
path: /storage/metabase
|
||||
perm: 0777
|
||||
blobstor:
|
||||
- path: /storage/path/blobovnicza
|
||||
type: blobovnicza
|
||||
perm: 0600
|
||||
opened_cache_capacity: 32
|
||||
depth: 1
|
||||
width: 1
|
||||
- path: /storage/path/fstree
|
||||
type: fstree
|
||||
perm: 0600
|
||||
depth: 4
|
||||
writecache:
|
||||
enabled: false
|
||||
gc:
|
||||
remover_batch_size: 100
|
||||
remover_sleep_interval: 1m
|
|
@ -51,10 +51,7 @@ However, all mode changing operations are idempotent.
|
|||
|
||||
## Automatic mode changes
|
||||
|
||||
Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
|
||||
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
|
||||
2. If shard error counter exceeds threshold.
|
||||
3. If the metabase couldn't be reopened during SIGHUP handling.
|
||||
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
|
||||
|
||||
# Detach shard
|
||||
|
||||
|
|
|
@ -170,7 +170,6 @@ Local storage engine configuration.
|
|||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
|
||||
| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
|
||||
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
|
||||
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
|
||||
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
|
||||
|
@ -195,6 +194,7 @@ The following table describes configuration for each shard.
|
|||
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
|
||||
| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
|
||||
| `gc` | [GC config](#gc-subsection) | | GC configuration. |
|
||||
| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
|
||||
|
||||
### `blobstor` subsection
|
||||
|
||||
|
@ -301,6 +301,64 @@ writecache:
|
|||
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
|
||||
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
|
||||
|
||||
### `limits` subsection
|
||||
|
||||
```yaml
|
||||
limits:
|
||||
max_read_running_ops: 10000
|
||||
max_read_waiting_ops: 1000
|
||||
max_write_running_ops: 1000
|
||||
max_write_waiting_ops: 100
|
||||
read:
|
||||
- tag: internal
|
||||
weight: 20
|
||||
limit_ops: 0
|
||||
reserved_ops: 1000
|
||||
- tag: client
|
||||
weight: 70
|
||||
reserved_ops: 10000
|
||||
- tag: background
|
||||
weight: 5
|
||||
limit_ops: 10000
|
||||
reserved_ops: 0
|
||||
- tag: writecache
|
||||
weight: 5
|
||||
limit_ops: 25000
|
||||
- tag: policer
|
||||
weight: 5
|
||||
limit_ops: 25000
|
||||
write:
|
||||
- tag: internal
|
||||
weight: 200
|
||||
limit_ops: 0
|
||||
reserved_ops: 100
|
||||
- tag: client
|
||||
weight: 700
|
||||
reserved_ops: 1000
|
||||
- tag: background
|
||||
weight: 50
|
||||
limit_ops: 1000
|
||||
reserved_ops: 0
|
||||
- tag: writecache
|
||||
weight: 50
|
||||
limit_ops: 2500
|
||||
- tag: policer
|
||||
weight: 50
|
||||
limit_ops: 2500
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||
| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
|
||||
| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
|
||||
| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
|
||||
| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
|
||||
| `read` | `[]tag` | empty | Array of shard read settings for tags. |
|
||||
| `write` | `[]tag` | empty | Array of shard write settings for tags. |
|
||||
| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
|
||||
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
|
||||
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
|
||||
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
|
||||
|
||||
# `node` section
|
||||
|
||||
|
@ -396,18 +454,16 @@ replicator:
|
|||
pool_size: 10
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|---------------|------------|----------------------------------------|---------------------------------------------|
|
||||
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
|
||||
| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
|
||||
| Parameter | Type | Default value | Description |
|
||||
|---------------|------------|---------------|---------------------------------------------|
|
||||
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
|
||||
| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
|
||||
|
||||
# `object` section
|
||||
Contains object-service related parameters.
|
||||
|
||||
```yaml
|
||||
object:
|
||||
put:
|
||||
remote_pool_size: 100
|
||||
get:
|
||||
priority:
|
||||
- $attribute:ClusterName
|
||||
|
@ -416,10 +472,29 @@ object:
|
|||
| Parameter | Type | Default value | Description |
|
||||
|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
|
||||
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
||||
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
|
||||
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
|
||||
| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
|
||||
|
||||
|
||||
# `rpc` section
|
||||
Contains limits on the number of active RPC for specified method(s).
|
||||
|
||||
```yaml
|
||||
rpc:
|
||||
limits:
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
||||
- /neo.fs.v2.object.ObjectService/Put
|
||||
max_ops: 1000
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/Get
|
||||
max_ops: 10000
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|------------------|------------|---------------|--------------------------------------------------------------|
|
||||
| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
|
||||
| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
|
||||
|
||||
# `runtime` section
|
||||
Contains runtime parameters.
|
||||
|
||||
|
|
12
go.mod
12
go.mod
|
@ -1,18 +1,18 @@
|
|||
module git.frostfs.info/TrueCloudLab/frostfs-node
|
||||
|
||||
go 1.22
|
||||
go 1.23
|
||||
|
||||
require (
|
||||
code.gitea.io/sdk/gitea v0.17.1
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4
|
||||
|
|
20
go.sum
20
go.sum
|
@ -1,25 +1,25 @@
|
|||
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
|
||||
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
|
||||
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
|
||||
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||
|
|
9
internal/assert/cond.go
Normal file
9
internal/assert/cond.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package assert
|
||||
|
||||
import "strings"
|
||||
|
||||
func True(cond bool, details ...string) {
|
||||
if !cond {
|
||||
panic(strings.Join(details, " "))
|
||||
}
|
||||
}
|
|
@ -125,7 +125,6 @@ const (
|
|||
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
|
||||
SearchLocalOperationFailed = "local operation failed"
|
||||
UtilObjectServiceError = "object service error"
|
||||
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
|
||||
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
|
||||
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
|
||||
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
|
||||
|
@ -253,6 +252,7 @@ const (
|
|||
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
|
||||
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
|
||||
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
|
||||
ShardCouldNotFindObject = "could not find object"
|
||||
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
|
||||
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
|
||||
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
|
||||
|
@ -513,4 +513,6 @@ const (
|
|||
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
|
||||
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
|
||||
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
|
||||
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
|
||||
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
|
||||
)
|
||||
|
|
|
@ -23,6 +23,7 @@ const (
|
|||
policerSubsystem = "policer"
|
||||
commonCacheSubsystem = "common_cache"
|
||||
multinetSubsystem = "multinet"
|
||||
qosSubsystem = "qos"
|
||||
|
||||
successLabel = "success"
|
||||
shardIDLabel = "shard_id"
|
||||
|
@ -43,6 +44,7 @@ const (
|
|||
hitLabel = "hit"
|
||||
cacheLabel = "cache"
|
||||
sourceIPLabel = "source_ip"
|
||||
ioTagLabel = "io_tag"
|
||||
|
||||
readWriteMode = "READ_WRITE"
|
||||
readOnlyMode = "READ_ONLY"
|
||||
|
|
|
@ -26,6 +26,7 @@ type NodeMetrics struct {
|
|||
morphCache *morphCacheMetrics
|
||||
log logger.LogMetrics
|
||||
multinet *multinetMetrics
|
||||
qos *QoSMetrics
|
||||
// nolint: unused
|
||||
appInfo *ApplicationInfo
|
||||
}
|
||||
|
@ -55,6 +56,7 @@ func NewNodeMetrics() *NodeMetrics {
|
|||
log: logger.NewLogMetrics(namespace),
|
||||
appInfo: NewApplicationInfo(misc.Version),
|
||||
multinet: newMultinetMetrics(namespace),
|
||||
qos: newQoSMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,3 +128,7 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
|
|||
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
|
||||
return m.multinet
|
||||
}
|
||||
|
||||
func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
|
||||
return m.qos
|
||||
}
|
||||
|
|
|
@ -9,13 +9,14 @@ import (
|
|||
)
|
||||
|
||||
type ObjectServiceMetrics interface {
|
||||
AddRequestDuration(method string, d time.Duration, success bool)
|
||||
AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
|
||||
AddPayloadSize(method string, size int)
|
||||
}
|
||||
|
||||
type objectServiceMetrics struct {
|
||||
methodDuration *prometheus.HistogramVec
|
||||
payloadCounter *prometheus.CounterVec
|
||||
methodDuration *prometheus.HistogramVec
|
||||
payloadCounter *prometheus.CounterVec
|
||||
ioTagOpsCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func newObjectServiceMetrics() *objectServiceMetrics {
|
||||
|
@ -32,14 +33,24 @@ func newObjectServiceMetrics() *objectServiceMetrics {
|
|||
Name: "request_payload_bytes",
|
||||
Help: "Object Service request payload",
|
||||
}, []string{methodLabel}),
|
||||
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: objectSubsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Count of requests for each IO tag",
|
||||
}, []string{methodLabel, ioTagLabel}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
|
||||
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
|
||||
m.methodDuration.With(prometheus.Labels{
|
||||
methodLabel: method,
|
||||
successLabel: strconv.FormatBool(success),
|
||||
}).Observe(d.Seconds())
|
||||
m.ioTagOpsCounter.With(prometheus.Labels{
|
||||
ioTagLabel: ioTag,
|
||||
methodLabel: method,
|
||||
}).Inc()
|
||||
}
|
||||
|
||||
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
|
||||
|
|
52
internal/metrics/qos.go
Normal file
52
internal/metrics/qos.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type QoSMetrics struct {
|
||||
opsCounter *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
func newQoSMetrics() *QoSMetrics {
|
||||
return &QoSMetrics{
|
||||
opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: qosSubsystem,
|
||||
Name: "operations_total",
|
||||
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
|
||||
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
|
||||
m.opsCounter.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
operationLabel: operation,
|
||||
ioTagLabel: tag,
|
||||
typeLabel: "pending",
|
||||
}).Set(float64(pending))
|
||||
m.opsCounter.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
operationLabel: operation,
|
||||
ioTagLabel: tag,
|
||||
typeLabel: "in_progress",
|
||||
}).Set(float64(inProgress))
|
||||
m.opsCounter.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
operationLabel: operation,
|
||||
ioTagLabel: tag,
|
||||
typeLabel: "completed",
|
||||
}).Set(float64(completed))
|
||||
m.opsCounter.With(prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
operationLabel: operation,
|
||||
ioTagLabel: tag,
|
||||
typeLabel: "resource_exhausted",
|
||||
}).Set(float64(resourceExhausted))
|
||||
}
|
||||
|
||||
func (m *QoSMetrics) Close(shardID string) {
|
||||
m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
|
||||
}
|
|
@ -12,12 +12,14 @@ type TreeMetricsRegister interface {
|
|||
AddReplicateTaskDuration(time.Duration, bool)
|
||||
AddReplicateWaitDuration(time.Duration, bool)
|
||||
AddSyncDuration(time.Duration, bool)
|
||||
AddOperation(string, string)
|
||||
}
|
||||
|
||||
type treeServiceMetrics struct {
|
||||
replicateTaskDuration *prometheus.HistogramVec
|
||||
replicateWaitDuration *prometheus.HistogramVec
|
||||
syncOpDuration *prometheus.HistogramVec
|
||||
ioTagOpsCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
|
||||
|
@ -42,6 +44,12 @@ func newTreeServiceMetrics() *treeServiceMetrics {
|
|||
Name: "sync_duration_seconds",
|
||||
Help: "Duration of synchronization operations",
|
||||
}, []string{successLabel}),
|
||||
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: treeServiceSubsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Count of requests for each IO tag",
|
||||
}, []string{methodLabel, ioTagLabel}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,3 +70,10 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
|
|||
successLabel: strconv.FormatBool(success),
|
||||
}).Observe(d.Seconds())
|
||||
}
|
||||
|
||||
func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
|
||||
m.ioTagOpsCounter.With(prometheus.Labels{
|
||||
ioTagLabel: ioTag,
|
||||
methodLabel: op,
|
||||
}).Inc()
|
||||
}
|
||||
|
|
|
@ -3,7 +3,9 @@ package qos
|
|||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
@ -24,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
|
|||
if err != nil {
|
||||
tag = IOTagClient
|
||||
}
|
||||
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||
if tag.IsLocal() {
|
||||
tag = IOTagInternal
|
||||
}
|
||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||
|
@ -42,10 +44,43 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
|
|||
if err != nil {
|
||||
tag = IOTagClient
|
||||
}
|
||||
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||
if tag.IsLocal() {
|
||||
tag = IOTagInternal
|
||||
}
|
||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||
return streamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
release, ok := getLimiter().Acquire(info.FullMethod)
|
||||
if !ok {
|
||||
return nil, new(apistatus.ResourceExhausted)
|
||||
}
|
||||
defer release()
|
||||
|
||||
return handler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:contextcheck (grpc.ServerStream manages the context itself)
|
||||
func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
|
||||
return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
|
||||
return handler(srv, ss)
|
||||
}
|
||||
|
||||
release, ok := getLimiter().Acquire(info.FullMethod)
|
||||
if !ok {
|
||||
return new(apistatus.ResourceExhausted)
|
||||
}
|
||||
defer release()
|
||||
|
||||
return handler(srv, ss)
|
||||
}
|
||||
}
|
||||
|
|
236
internal/qos/limiter.go
Normal file
236
internal/qos/limiter.go
Normal file
|
@ -0,0 +1,236 @@
|
|||
package qos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultIdleTimeout time.Duration = 0
|
||||
defaultShare float64 = 1.0
|
||||
minusOne = ^uint64(0)
|
||||
|
||||
defaultMetricsCollectTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
type ReleaseFunc scheduling.ReleaseFunc
|
||||
|
||||
type Limiter interface {
|
||||
ReadRequest(context.Context) (ReleaseFunc, error)
|
||||
WriteRequest(context.Context) (ReleaseFunc, error)
|
||||
SetParentID(string)
|
||||
SetMetrics(Metrics)
|
||||
Close()
|
||||
}
|
||||
|
||||
type scheduler interface {
|
||||
RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
func NewLimiter(c *limits.Config) (Limiter, error) {
|
||||
if err := validateConfig(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readScheduler, err := createScheduler(c.Read())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create read scheduler: %w", err)
|
||||
}
|
||||
writeScheduler, err := createScheduler(c.Write())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create write scheduler: %w", err)
|
||||
}
|
||||
l := &mClockLimiter{
|
||||
readScheduler: readScheduler,
|
||||
writeScheduler: writeScheduler,
|
||||
closeCh: make(chan struct{}),
|
||||
wg: &sync.WaitGroup{},
|
||||
readStats: createStats(),
|
||||
writeStats: createStats(),
|
||||
}
|
||||
l.shardID.Store(&shardID{})
|
||||
l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
|
||||
l.startMetricsCollect()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func createScheduler(config limits.OpConfig) (scheduler, error) {
|
||||
if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit {
|
||||
return newSemaphoreScheduler(config.MaxRunningOps), nil
|
||||
}
|
||||
return scheduling.NewMClock(
|
||||
uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
|
||||
converToSchedulingTags(config.Tags), config.IdleTimeout)
|
||||
}
|
||||
|
||||
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
|
||||
result := make(map[string]scheduling.TagInfo)
|
||||
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
|
||||
result[tag.String()] = scheduling.TagInfo{
|
||||
Share: defaultShare,
|
||||
}
|
||||
}
|
||||
for _, l := range limits {
|
||||
v := result[l.Tag]
|
||||
if l.Weight != nil && *l.Weight != 0 {
|
||||
v.Share = *l.Weight
|
||||
}
|
||||
if l.LimitOps != nil && *l.LimitOps != 0 {
|
||||
v.LimitIOPS = l.LimitOps
|
||||
}
|
||||
if l.ReservedOps != nil && *l.ReservedOps != 0 {
|
||||
v.ReservedIOPS = l.ReservedOps
|
||||
}
|
||||
result[l.Tag] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var (
|
||||
_ Limiter = (*noopLimiter)(nil)
|
||||
releaseStub ReleaseFunc = func() {}
|
||||
noopLimiterInstance = &noopLimiter{}
|
||||
)
|
||||
|
||||
func NewNoopLimiter() Limiter {
|
||||
return noopLimiterInstance
|
||||
}
|
||||
|
||||
type noopLimiter struct{}
|
||||
|
||||
func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
|
||||
return releaseStub, nil
|
||||
}
|
||||
|
||||
func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
|
||||
return releaseStub, nil
|
||||
}
|
||||
|
||||
func (n *noopLimiter) SetParentID(string) {}
|
||||
|
||||
func (n *noopLimiter) Close() {}
|
||||
|
||||
func (n *noopLimiter) SetMetrics(Metrics) {}
|
||||
|
||||
var _ Limiter = (*mClockLimiter)(nil)
|
||||
|
||||
type shardID struct {
|
||||
id string
|
||||
}
|
||||
|
||||
type mClockLimiter struct {
|
||||
readScheduler scheduler
|
||||
writeScheduler scheduler
|
||||
|
||||
readStats map[string]*stat
|
||||
writeStats map[string]*stat
|
||||
|
||||
shardID atomic.Pointer[shardID]
|
||||
metrics atomic.Pointer[metricsHolder]
|
||||
closeCh chan struct{}
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
|
||||
return requestArrival(ctx, n.readScheduler, n.readStats)
|
||||
}
|
||||
|
||||
func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
|
||||
return requestArrival(ctx, n.writeScheduler, n.writeStats)
|
||||
}
|
||||
|
||||
func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
if !ok {
|
||||
tag = IOTagClient.String()
|
||||
}
|
||||
stat := getStat(tag, stats)
|
||||
stat.pending.Add(1)
|
||||
if tag == IOTagCritical.String() {
|
||||
stat.inProgress.Add(1)
|
||||
return func() {
|
||||
stat.completed.Add(1)
|
||||
}, nil
|
||||
}
|
||||
rel, err := s.RequestArrival(ctx, tag)
|
||||
stat.inProgress.Add(1)
|
||||
if err != nil {
|
||||
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
|
||||
errors.Is(err, errSemaphoreLimitExceeded) {
|
||||
stat.resourceExhausted.Add(1)
|
||||
return nil, &apistatus.ResourceExhausted{}
|
||||
}
|
||||
stat.completed.Add(1)
|
||||
return nil, err
|
||||
}
|
||||
return func() {
|
||||
rel()
|
||||
stat.completed.Add(1)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (n *mClockLimiter) Close() {
|
||||
n.readScheduler.Close()
|
||||
n.writeScheduler.Close()
|
||||
close(n.closeCh)
|
||||
n.wg.Wait()
|
||||
n.metrics.Load().metrics.Close(n.shardID.Load().id)
|
||||
}
|
||||
|
||||
func (n *mClockLimiter) SetParentID(parentID string) {
|
||||
n.shardID.Store(&shardID{id: parentID})
|
||||
}
|
||||
|
||||
func (n *mClockLimiter) SetMetrics(m Metrics) {
|
||||
n.metrics.Store(&metricsHolder{metrics: m})
|
||||
}
|
||||
|
||||
func (n *mClockLimiter) startMetricsCollect() {
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(defaultMetricsCollectTimeout)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-n.closeCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
shardID := n.shardID.Load().id
|
||||
if shardID == "" {
|
||||
continue
|
||||
}
|
||||
metrics := n.metrics.Load().metrics
|
||||
exportMetrics(metrics, n.readStats, shardID, "read")
|
||||
exportMetrics(metrics, n.writeStats, shardID, "write")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
|
||||
var pending uint64
|
||||
var inProgress uint64
|
||||
var completed uint64
|
||||
var resExh uint64
|
||||
for tag, s := range stats {
|
||||
pending = s.pending.Load()
|
||||
inProgress = s.inProgress.Load()
|
||||
completed = s.completed.Load()
|
||||
resExh = s.resourceExhausted.Load()
|
||||
if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
|
||||
continue
|
||||
}
|
||||
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
|
||||
}
|
||||
}
|
31
internal/qos/metrics.go
Normal file
31
internal/qos/metrics.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package qos
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
type Metrics interface {
|
||||
SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
|
||||
Close(shardID string)
|
||||
}
|
||||
|
||||
var _ Metrics = (*noopMetrics)(nil)
|
||||
|
||||
type noopMetrics struct{}
|
||||
|
||||
func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
|
||||
}
|
||||
|
||||
func (n *noopMetrics) Close(string) {}
|
||||
|
||||
// stat presents limiter statistics cumulative counters.
|
||||
//
|
||||
// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
|
||||
type stat struct {
|
||||
completed atomic.Uint64
|
||||
pending atomic.Uint64
|
||||
resourceExhausted atomic.Uint64
|
||||
inProgress atomic.Uint64
|
||||
}
|
||||
|
||||
type metricsHolder struct {
|
||||
metrics Metrics
|
||||
}
|
39
internal/qos/semaphore.go
Normal file
39
internal/qos/semaphore.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package qos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
|
||||
)
|
||||
|
||||
var (
|
||||
_ scheduler = (*semaphore)(nil)
|
||||
errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
|
||||
)
|
||||
|
||||
type semaphore struct {
|
||||
s *qosSemaphore.Semaphore
|
||||
}
|
||||
|
||||
func newSemaphoreScheduler(size int64) *semaphore {
|
||||
return &semaphore{
|
||||
s: qosSemaphore.NewSemaphore(size),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *semaphore) Close() {}
|
||||
|
||||
func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if s.s.Acquire() {
|
||||
return s.s.Release, nil
|
||||
}
|
||||
return nil, errSemaphoreLimitExceeded
|
||||
}
|
29
internal/qos/stats.go
Normal file
29
internal/qos/stats.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package qos
|
||||
|
||||
const unknownStatsTag = "unknown"
|
||||
|
||||
var statTags = map[string]struct{}{
|
||||
IOTagBackground.String(): {},
|
||||
IOTagClient.String(): {},
|
||||
IOTagCritical.String(): {},
|
||||
IOTagInternal.String(): {},
|
||||
IOTagPolicer.String(): {},
|
||||
IOTagTreeSync.String(): {},
|
||||
IOTagWritecache.String(): {},
|
||||
unknownStatsTag: {},
|
||||
}
|
||||
|
||||
func createStats() map[string]*stat {
|
||||
result := make(map[string]*stat)
|
||||
for tag := range statTags {
|
||||
result[tag] = &stat{}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func getStat(tag string, stats map[string]*stat) *stat {
|
||||
if v, ok := stats[tag]; ok {
|
||||
return v
|
||||
}
|
||||
return stats[unknownStatsTag]
|
||||
}
|
|
@ -1,34 +1,42 @@
|
|||
package qos
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
)
|
||||
|
||||
type IOTag string
|
||||
|
||||
const (
|
||||
IOTagClient IOTag = "client"
|
||||
IOTagInternal IOTag = "internal"
|
||||
IOTagBackground IOTag = "background"
|
||||
IOTagWritecache IOTag = "writecache"
|
||||
IOTagPolicer IOTag = "policer"
|
||||
IOTagClient IOTag = "client"
|
||||
IOTagCritical IOTag = "critical"
|
||||
IOTagInternal IOTag = "internal"
|
||||
IOTagPolicer IOTag = "policer"
|
||||
IOTagTreeSync IOTag = "treesync"
|
||||
IOTagWritecache IOTag = "writecache"
|
||||
|
||||
ioTagUnknown IOTag = ""
|
||||
)
|
||||
|
||||
func FromRawString(s string) (IOTag, error) {
|
||||
switch s {
|
||||
case string(IOTagCritical):
|
||||
return IOTagCritical, nil
|
||||
case string(IOTagClient):
|
||||
return IOTagClient, nil
|
||||
case string(IOTagInternal):
|
||||
return IOTagInternal, nil
|
||||
case string(IOTagBackground):
|
||||
return IOTagBackground, nil
|
||||
case string(IOTagWritecache):
|
||||
return IOTagWritecache, nil
|
||||
case string(IOTagClient):
|
||||
return IOTagClient, nil
|
||||
case string(IOTagCritical):
|
||||
return IOTagCritical, nil
|
||||
case string(IOTagInternal):
|
||||
return IOTagInternal, nil
|
||||
case string(IOTagPolicer):
|
||||
return IOTagPolicer, nil
|
||||
case string(IOTagTreeSync):
|
||||
return IOTagTreeSync, nil
|
||||
case string(IOTagWritecache):
|
||||
return IOTagWritecache, nil
|
||||
default:
|
||||
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
|
||||
}
|
||||
|
@ -37,3 +45,15 @@ func FromRawString(s string) (IOTag, error) {
|
|||
func (t IOTag) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func IOTagFromContext(ctx context.Context) string {
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
if !ok {
|
||||
tag = "undefined"
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func (t IOTag) IsLocal() bool {
|
||||
return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
|
||||
}
|
||||
|
|
93
internal/qos/validate.go
Normal file
93
internal/qos/validate.go
Normal file
|
@ -0,0 +1,93 @@
|
|||
package qos
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
)
|
||||
|
||||
var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
|
||||
|
||||
type tagConfig struct {
|
||||
Shares, Limit, Reserved *float64
|
||||
}
|
||||
|
||||
func validateConfig(c *limits.Config) error {
|
||||
if err := validateOpConfig(c.Read()); err != nil {
|
||||
return fmt.Errorf("limits 'read' section validation error: %w", err)
|
||||
}
|
||||
if err := validateOpConfig(c.Write()); err != nil {
|
||||
return fmt.Errorf("limits 'write' section validation error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOpConfig(c limits.OpConfig) error {
|
||||
if c.MaxRunningOps <= 0 {
|
||||
return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
|
||||
}
|
||||
if c.MaxWaitingOps <= 0 {
|
||||
return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps)
|
||||
}
|
||||
if c.IdleTimeout <= 0 {
|
||||
return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String())
|
||||
}
|
||||
if err := validateTags(c.Tags); err != nil {
|
||||
return fmt.Errorf("'tags' config section validation error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateTags(configTags []limits.IOTagConfig) error {
|
||||
tags := map[IOTag]tagConfig{
|
||||
IOTagBackground: {},
|
||||
IOTagClient: {},
|
||||
IOTagInternal: {},
|
||||
IOTagPolicer: {},
|
||||
IOTagTreeSync: {},
|
||||
IOTagWritecache: {},
|
||||
}
|
||||
for _, t := range configTags {
|
||||
tag, err := FromRawString(t.Tag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid tag %s: %w", t.Tag, err)
|
||||
}
|
||||
if _, ok := tags[tag]; !ok {
|
||||
return fmt.Errorf("tag %s is not configurable", t.Tag)
|
||||
}
|
||||
tags[tag] = tagConfig{
|
||||
Shares: t.Weight,
|
||||
Limit: t.LimitOps,
|
||||
Reserved: t.ReservedOps,
|
||||
}
|
||||
}
|
||||
idx := 0
|
||||
var shares float64
|
||||
for t, v := range tags {
|
||||
if idx == 0 {
|
||||
idx++
|
||||
shares = float64Value(v.Shares)
|
||||
} else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) {
|
||||
return errWeightsMustBeSpecified
|
||||
}
|
||||
if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) {
|
||||
return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String())
|
||||
}
|
||||
if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) {
|
||||
return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String())
|
||||
}
|
||||
if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) {
|
||||
return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func float64Value(f *float64) float64 {
|
||||
if f == nil {
|
||||
return 0.0
|
||||
}
|
||||
return *f
|
||||
}
|
|
@ -209,7 +209,7 @@ func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr container
|
|||
return fmt.Errorf("could not get setting in contract: %w", err)
|
||||
}
|
||||
|
||||
if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting {
|
||||
if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting {
|
||||
return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting)
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
|
|||
var res common.RebuildRes
|
||||
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild)
|
||||
completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage)
|
||||
completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter)
|
||||
res.ObjectsMoved += completedPreviosMoves
|
||||
if err != nil {
|
||||
b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err))
|
||||
|
@ -79,7 +79,7 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.
|
|||
var completedDBCount uint32
|
||||
for _, db := range dbs {
|
||||
b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db))
|
||||
movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter)
|
||||
movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter)
|
||||
res.ObjectsMoved += movedObjects
|
||||
if err != nil {
|
||||
b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err))
|
||||
|
@ -195,7 +195,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil
|
|||
return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
|
||||
}
|
||||
|
||||
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
|
||||
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) {
|
||||
shDB := b.getBlobovnicza(ctx, path)
|
||||
blz, err := shDB.Open(ctx)
|
||||
if err != nil {
|
||||
|
@ -212,7 +212,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
|
|||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter)
|
||||
migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter)
|
||||
if err != nil {
|
||||
return migratedObjects, err
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (fun
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
|
||||
func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) {
|
||||
var result atomic.Uint64
|
||||
batch := make(map[oid.Address][]byte)
|
||||
|
||||
|
@ -253,7 +253,12 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
|
|||
})
|
||||
|
||||
for {
|
||||
_, err := blz.Iterate(ctx, prm)
|
||||
release, err := limiter.ReadRequest(ctx)
|
||||
if err != nil {
|
||||
return result.Load(), err
|
||||
}
|
||||
_, err = blz.Iterate(ctx, prm)
|
||||
release()
|
||||
if err != nil && !errors.Is(err, errBatchFull) {
|
||||
return result.Load(), err
|
||||
}
|
||||
|
@ -265,13 +270,19 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
|
|||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
|
||||
for addr, data := range batch {
|
||||
if err := limiter.AcquireWorkSlot(egCtx); err != nil {
|
||||
release, err := limiter.AcquireWorkSlot(egCtx)
|
||||
if err != nil {
|
||||
_ = eg.Wait()
|
||||
return result.Load(), err
|
||||
}
|
||||
eg.Go(func() error {
|
||||
defer limiter.ReleaseWorkSlot()
|
||||
err := b.moveObject(egCtx, blz, blzPath, addr, data, meta)
|
||||
defer release()
|
||||
moveRelease, err := limiter.WriteRequest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.moveObject(egCtx, blz, blzPath, addr, data, meta)
|
||||
moveRelease()
|
||||
if err == nil {
|
||||
result.Add(1)
|
||||
}
|
||||
|
@ -359,7 +370,7 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
|
|||
return b.dropDirectoryIfEmpty(filepath.Dir(path))
|
||||
}
|
||||
|
||||
func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) {
|
||||
func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) {
|
||||
var count uint64
|
||||
var rebuildTempFilesToRemove []string
|
||||
err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
|
||||
|
@ -372,13 +383,24 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
|
|||
}
|
||||
defer shDB.Close(ctx)
|
||||
|
||||
release, err := rateLimiter.ReadRequest(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
incompletedMoves, err := blz.ListMoveInfo(ctx)
|
||||
release()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
for _, move := range incompletedMoves {
|
||||
if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil {
|
||||
release, err := rateLimiter.WriteRequest(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore)
|
||||
release()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
count++
|
||||
|
@ -388,9 +410,14 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
|
|||
return false, nil
|
||||
})
|
||||
for _, tmp := range rebuildTempFilesToRemove {
|
||||
release, err := rateLimiter.WriteRequest(ctx)
|
||||
if err != nil {
|
||||
return count, err
|
||||
}
|
||||
if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
|
||||
b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
|
||||
}
|
||||
release()
|
||||
}
|
||||
return count, err
|
||||
}
|
||||
|
|
|
@ -161,16 +161,18 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
|
|||
storageIDs: make(map[oid.Address][]byte),
|
||||
guard: &sync.Mutex{},
|
||||
}
|
||||
limiter := &rebuildLimiterStub{}
|
||||
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
|
||||
MetaStorage: metaStub,
|
||||
WorkerLimiter: &rebuildLimiterStub{},
|
||||
FillPercent: 1,
|
||||
MetaStorage: metaStub,
|
||||
Limiter: limiter,
|
||||
FillPercent: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), rRes.ObjectsMoved)
|
||||
require.Equal(t, uint64(0), rRes.FilesRemoved)
|
||||
|
||||
require.NoError(t, b.Close(context.Background()))
|
||||
require.NoError(t, limiter.ValidateReleased())
|
||||
|
||||
blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db")))
|
||||
require.NoError(t, blz.Open(context.Background()))
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue