Compare commits
7 commits
master
...
remove-dan
Author | SHA1 | Date | |
---|---|---|---|
d63ff20078 | |||
cee94aae33 | |||
e119f35827 | |||
ab1362c297 | |||
24709df702 | |||
3cbd8be700 | |||
efec26b8ef |
242 changed files with 4641 additions and 4993 deletions
83
.ci/Jenkinsfile
vendored
83
.ci/Jenkinsfile
vendored
|
@ -1,83 +0,0 @@
|
||||||
def golang = ['1.23', '1.24']
|
|
||||||
def golangDefault = "golang:${golang.last()}"
|
|
||||||
|
|
||||||
async {
|
|
||||||
|
|
||||||
for (version in golang) {
|
|
||||||
def go = version
|
|
||||||
|
|
||||||
task("test/go${go}") {
|
|
||||||
container("golang:${go}") {
|
|
||||||
sh 'make test'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task("build/go${go}") {
|
|
||||||
container("golang:${go}") {
|
|
||||||
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
|
|
||||||
sh """
|
|
||||||
make bin/frostfs-${app}
|
|
||||||
bin/frostfs-${app} --version
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('test/race') {
|
|
||||||
container(golangDefault) {
|
|
||||||
sh 'make test GOFLAGS="-count=1 -race"'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('lint') {
|
|
||||||
container(golangDefault) {
|
|
||||||
sh 'make lint-install lint'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('staticcheck') {
|
|
||||||
container(golangDefault) {
|
|
||||||
sh 'make staticcheck-install staticcheck-run'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('gopls') {
|
|
||||||
container(golangDefault) {
|
|
||||||
sh 'make gopls-install gopls-run'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('gofumpt') {
|
|
||||||
container(golangDefault) {
|
|
||||||
sh '''
|
|
||||||
make fumpt-install
|
|
||||||
make fumpt
|
|
||||||
git diff --exit-code --quiet
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('vulncheck') {
|
|
||||||
container(golangDefault) {
|
|
||||||
sh '''
|
|
||||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
govulncheck ./...
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task('pre-commit') {
|
|
||||||
dockerfile("""
|
|
||||||
FROM ${golangDefault}
|
|
||||||
RUN apt update && \
|
|
||||||
apt install -y --no-install-recommends pre-commit
|
|
||||||
""") {
|
|
||||||
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
|
|
||||||
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: dco check
|
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.23', '1.24' ]
|
go_versions: [ '1.22', '1.23' ]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -13,7 +13,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Run commit format checker
|
- name: Run commit format checker
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.24
|
go-version: 1.23
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
run: |
|
run: |
|
||||||
apt update
|
apt update
|
||||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.23'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install linters
|
- name: Install linters
|
||||||
|
@ -30,7 +30,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.23', '1.24' ]
|
go_versions: [ '1.22', '1.23' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
@ -53,7 +53,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.22'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
@ -68,7 +68,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.23'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install staticcheck
|
- name: Install staticcheck
|
||||||
|
@ -104,7 +104,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.23'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install gofumpt
|
- name: Install gofumpt
|
||||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.23'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
|
|
|
@ -22,11 +22,6 @@ linters-settings:
|
||||||
# 'default' case is present, even if all enum members aren't listed in the
|
# 'default' case is present, even if all enum members aren't listed in the
|
||||||
# switch
|
# switch
|
||||||
default-signifies-exhaustive: true
|
default-signifies-exhaustive: true
|
||||||
gci:
|
|
||||||
sections:
|
|
||||||
- standard
|
|
||||||
- default
|
|
||||||
custom-order: true
|
|
||||||
govet:
|
govet:
|
||||||
# report about shadowed variables
|
# report about shadowed variables
|
||||||
check-shadowing: false
|
check-shadowing: false
|
||||||
|
@ -77,7 +72,6 @@ linters:
|
||||||
- durationcheck
|
- durationcheck
|
||||||
- exhaustive
|
- exhaustive
|
||||||
- copyloopvar
|
- copyloopvar
|
||||||
- gci
|
|
||||||
- gofmt
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
|
|
42
Makefile
42
Makefile
|
@ -1,6 +1,5 @@
|
||||||
#!/usr/bin/make -f
|
#!/usr/bin/make -f
|
||||||
SHELL = bash
|
SHELL = bash
|
||||||
.SHELLFLAGS = -euo pipefail -c
|
|
||||||
|
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
|
@ -8,7 +7,7 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
|
||||||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
GO_VERSION ?= 1.23
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.62.2
|
LINT_VERSION ?= 1.62.2
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
|
||||||
PROTOC_VERSION ?= 25.0
|
PROTOC_VERSION ?= 25.0
|
||||||
|
@ -17,7 +16,7 @@ PROTOC_OS_VERSION=osx-x86_64
|
||||||
ifeq ($(shell uname), Linux)
|
ifeq ($(shell uname), Linux)
|
||||||
PROTOC_OS_VERSION=linux-x86_64
|
PROTOC_OS_VERSION=linux-x86_64
|
||||||
endif
|
endif
|
||||||
STATICCHECK_VERSION ?= 2025.1.1
|
STATICCHECK_VERSION ?= 2024.1.1
|
||||||
ARCH = amd64
|
ARCH = amd64
|
||||||
|
|
||||||
BIN = bin
|
BIN = bin
|
||||||
|
@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0
|
||||||
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
|
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
|
||||||
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
|
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
|
||||||
|
|
||||||
GOPLS_VERSION ?= v0.17.1
|
GOPLS_VERSION ?= v0.15.1
|
||||||
GOPLS_DIR ?= $(abspath $(BIN))/gopls
|
GOPLS_DIR ?= $(abspath $(BIN))/gopls
|
||||||
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
|
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
|
||||||
GOPLS_TEMP_FILE := $(shell mktemp)
|
GOPLS_TEMP_FILE := $(shell mktemp)
|
||||||
|
@ -116,7 +115,7 @@ protoc:
|
||||||
# Install protoc
|
# Install protoc
|
||||||
protoc-install:
|
protoc-install:
|
||||||
@rm -rf $(PROTOBUF_DIR)
|
@rm -rf $(PROTOBUF_DIR)
|
||||||
@mkdir -p $(PROTOBUF_DIR)
|
@mkdir $(PROTOBUF_DIR)
|
||||||
@echo "⇒ Installing protoc... "
|
@echo "⇒ Installing protoc... "
|
||||||
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
||||||
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
||||||
|
@ -170,7 +169,7 @@ imports:
|
||||||
# Install gofumpt
|
# Install gofumpt
|
||||||
fumpt-install:
|
fumpt-install:
|
||||||
@rm -rf $(GOFUMPT_DIR)
|
@rm -rf $(GOFUMPT_DIR)
|
||||||
@mkdir -p $(GOFUMPT_DIR)
|
@mkdir $(GOFUMPT_DIR)
|
||||||
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
||||||
|
|
||||||
# Run gofumpt
|
# Run gofumpt
|
||||||
|
@ -187,37 +186,14 @@ test:
|
||||||
@echo "⇒ Running go test"
|
@echo "⇒ Running go test"
|
||||||
@GOFLAGS="$(GOFLAGS)" go test ./...
|
@GOFLAGS="$(GOFLAGS)" go test ./...
|
||||||
|
|
||||||
# Install Gerrit commit-msg hook
|
|
||||||
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
|
|
||||||
review-install:
|
|
||||||
@git config remote.review.url \
|
|
||||||
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
|
|
||||||
@mkdir -p $(GIT_HOOK_DIR)/
|
|
||||||
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
|
|
||||||
@chmod +x $(GIT_HOOK_DIR)/commit-msg
|
|
||||||
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
|
|
||||||
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
|
|
||||||
|
|
||||||
# Create a PR in Gerrit
|
|
||||||
review: BRANCH ?= master
|
|
||||||
review:
|
|
||||||
@git push review HEAD:refs/for/$(BRANCH) \
|
|
||||||
--push-option r=e.stratonikov@yadro.com \
|
|
||||||
--push-option r=d.stepanov@yadro.com \
|
|
||||||
--push-option r=an.nikiforov@yadro.com \
|
|
||||||
--push-option r=a.arifullin@yadro.com \
|
|
||||||
--push-option r=ekaterina.lebedeva@yadro.com \
|
|
||||||
--push-option r=a.savchuk@yadro.com \
|
|
||||||
--push-option r=a.chuprov@yadro.com
|
|
||||||
|
|
||||||
# Run pre-commit
|
# Run pre-commit
|
||||||
pre-commit-run:
|
pre-commit-run:
|
||||||
@pre-commit run -a --hook-stage manual
|
@pre-commit run -a --hook-stage manual
|
||||||
|
|
||||||
# Install linters
|
# Install linters
|
||||||
lint-install: $(BIN)
|
lint-install:
|
||||||
@rm -rf $(OUTPUT_LINT_DIR)
|
@rm -rf $(OUTPUT_LINT_DIR)
|
||||||
@mkdir -p $(OUTPUT_LINT_DIR)
|
@mkdir $(OUTPUT_LINT_DIR)
|
||||||
@mkdir -p $(TMP_DIR)
|
@mkdir -p $(TMP_DIR)
|
||||||
@rm -rf $(TMP_DIR)/linters
|
@rm -rf $(TMP_DIR)/linters
|
||||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||||
|
@ -236,7 +212,7 @@ lint:
|
||||||
# Install staticcheck
|
# Install staticcheck
|
||||||
staticcheck-install:
|
staticcheck-install:
|
||||||
@rm -rf $(STATICCHECK_DIR)
|
@rm -rf $(STATICCHECK_DIR)
|
||||||
@mkdir -p $(STATICCHECK_DIR)
|
@mkdir $(STATICCHECK_DIR)
|
||||||
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||||
|
|
||||||
# Run staticcheck
|
# Run staticcheck
|
||||||
|
@ -249,7 +225,7 @@ staticcheck-run:
|
||||||
# Install gopls
|
# Install gopls
|
||||||
gopls-install:
|
gopls-install:
|
||||||
@rm -rf $(GOPLS_DIR)
|
@rm -rf $(GOPLS_DIR)
|
||||||
@mkdir -p $(GOPLS_DIR)
|
@mkdir $(GOPLS_DIR)
|
||||||
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
|
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
|
||||||
|
|
||||||
# Run gopls
|
# Run gopls
|
||||||
|
|
|
@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
||||||
nbuf := make([]byte, 8)
|
nbuf := make([]byte, 8)
|
||||||
copy(nbuf[:], v)
|
copy(nbuf[:], v)
|
||||||
n := binary.LittleEndian.Uint64(nbuf)
|
n := binary.LittleEndian.Uint64(nbuf)
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
|
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
|
||||||
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
|
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
|
||||||
if len(v) == 0 || len(v) > 1 {
|
if len(v) == 0 || len(v) > 1 {
|
||||||
return helper.InvalidConfigValueErr(k)
|
return helper.InvalidConfigValueErr(k)
|
||||||
}
|
}
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
|
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
|
||||||
default:
|
default:
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
|
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
|
||||||
if info.version == "" {
|
if info.version == "" {
|
||||||
info.version = "unknown"
|
info.version = "unknown"
|
||||||
}
|
}
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
|
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
|
||||||
info.name, info.version, info.hash.StringLE()))
|
info.name, info.version, info.hash.StringLE())))
|
||||||
}
|
}
|
||||||
_ = tw.Flush()
|
_ = tw.Flush()
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ const (
|
||||||
subjectNameFlag = "subject-name"
|
subjectNameFlag = "subject-name"
|
||||||
subjectKeyFlag = "subject-key"
|
subjectKeyFlag = "subject-key"
|
||||||
subjectAddressFlag = "subject-address"
|
subjectAddressFlag = "subject-address"
|
||||||
extendedFlag = "extended"
|
includeNamesFlag = "include-names"
|
||||||
groupNameFlag = "group-name"
|
groupNameFlag = "group-name"
|
||||||
groupIDFlag = "group-id"
|
groupIDFlag = "group-id"
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ func initFrostfsIDListSubjectsCmd() {
|
||||||
Cmd.AddCommand(frostfsidListSubjectsCmd)
|
Cmd.AddCommand(frostfsidListSubjectsCmd)
|
||||||
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||||
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
|
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
|
||||||
frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
|
frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func initFrostfsIDCreateGroupCmd() {
|
func initFrostfsIDCreateGroupCmd() {
|
||||||
|
@ -256,7 +256,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
|
||||||
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||||
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
|
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
|
||||||
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
|
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
|
||||||
frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
|
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func initFrostfsIDSetKVCmd() {
|
func initFrostfsIDSetKVCmd() {
|
||||||
|
@ -336,7 +336,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
||||||
extended, _ := cmd.Flags().GetBool(extendedFlag)
|
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
|
||||||
ns := getFrostfsIDNamespace(cmd)
|
ns := getFrostfsIDNamespace(cmd)
|
||||||
inv, _, hash := initInvoker(cmd)
|
inv, _, hash := initInvoker(cmd)
|
||||||
reader := frostfsidrpclient.NewReader(inv, hash)
|
reader := frostfsidrpclient.NewReader(inv, hash)
|
||||||
|
@ -349,19 +349,21 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
||||||
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
|
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
|
||||||
|
|
||||||
for _, addr := range subAddresses {
|
for _, addr := range subAddresses {
|
||||||
if !extended {
|
if !includeNames {
|
||||||
cmd.Println(address.Uint160ToString(addr))
|
cmd.Println(address.Uint160ToString(addr))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
items, err := reader.GetSubject(addr)
|
sessionID, it, err := reader.ListSubjects()
|
||||||
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
||||||
|
|
||||||
|
items, err := readIterator(inv, &it, sessionID)
|
||||||
|
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||||
|
|
||||||
subj, err := frostfsidclient.ParseSubject(items)
|
subj, err := frostfsidclient.ParseSubject(items)
|
||||||
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
|
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
|
||||||
|
|
||||||
printSubjectInfo(cmd, addr, subj)
|
cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
|
||||||
cmd.Println()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,7 +483,7 @@ func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
|
||||||
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
||||||
ns := getFrostfsIDNamespace(cmd)
|
ns := getFrostfsIDNamespace(cmd)
|
||||||
groupID := getFrostfsIDGroupID(cmd)
|
groupID := getFrostfsIDGroupID(cmd)
|
||||||
extended, _ := cmd.Flags().GetBool(extendedFlag)
|
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
|
||||||
inv, cs, hash := initInvoker(cmd)
|
inv, cs, hash := initInvoker(cmd)
|
||||||
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
|
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
|
||||||
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
|
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
|
||||||
|
@ -499,7 +501,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
||||||
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
|
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
|
||||||
|
|
||||||
for _, subjAddr := range subjects {
|
for _, subjAddr := range subjects {
|
||||||
if !extended {
|
if !includeNames {
|
||||||
cmd.Println(address.Uint160ToString(subjAddr))
|
cmd.Println(address.Uint160ToString(subjAddr))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -508,8 +510,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
||||||
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
||||||
subj, err := frostfsidclient.ParseSubject(items)
|
subj, err := frostfsidclient.ParseSubject(items)
|
||||||
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
|
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
|
||||||
printSubjectInfo(cmd, subjAddr, subj)
|
cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
|
||||||
cmd.Println()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -599,30 +600,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
|
||||||
|
|
||||||
return inv, cs, nmHash
|
return inv, cs, nmHash
|
||||||
}
|
}
|
||||||
|
|
||||||
func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
|
|
||||||
cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
|
|
||||||
pk := "<nil>"
|
|
||||||
if subj.PrimaryKey != nil {
|
|
||||||
pk = subj.PrimaryKey.String()
|
|
||||||
}
|
|
||||||
cmd.Printf("Primary key: %s\n", pk)
|
|
||||||
cmd.Printf("Name: %s\n", subj.Name)
|
|
||||||
cmd.Printf("Namespace: %s\n", subj.Namespace)
|
|
||||||
if len(subj.AdditionalKeys) > 0 {
|
|
||||||
cmd.Printf("Additional keys:\n")
|
|
||||||
for _, key := range subj.AdditionalKeys {
|
|
||||||
k := "<nil>"
|
|
||||||
if key != nil {
|
|
||||||
k = key.String()
|
|
||||||
}
|
|
||||||
cmd.Printf("- %s\n", k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(subj.KV) > 0 {
|
|
||||||
cmd.Printf("KV:\n")
|
|
||||||
for k, v := range subj.KV {
|
|
||||||
cmd.Printf("- %s: %s\n", k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||||
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||||
|
@ -14,7 +13,9 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||||
|
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
|
||||||
}
|
}
|
||||||
|
|
||||||
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
|
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
|
||||||
inv := invoker.New(c, nil)
|
switch c.(type) {
|
||||||
reader := nns2.NewReader(inv, nnsHash)
|
case *rpcclient.Client:
|
||||||
return reader.IsAvailable(name)
|
inv := invoker.New(c, nil)
|
||||||
|
reader := nns2.NewReader(inv, nnsHash)
|
||||||
|
return reader.IsAvailable(name)
|
||||||
|
default:
|
||||||
|
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func CheckNotaryEnabled(c Client) error {
|
func CheckNotaryEnabled(c Client) error {
|
||||||
|
|
|
@ -40,8 +40,6 @@ type ClientContext struct {
|
||||||
CommitteeAct *actor.Actor // committee actor with the Global witness scope
|
CommitteeAct *actor.Actor // committee actor with the Global witness scope
|
||||||
ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer
|
ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer
|
||||||
SentTxs []HashVUBPair
|
SentTxs []HashVUBPair
|
||||||
|
|
||||||
AwaitDisabled bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRemoteClient(v *viper.Viper) (Client, error) {
|
func NewRemoteClient(v *viper.Viper) (Client, error) {
|
||||||
|
@ -122,7 +120,7 @@ func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClientContext) AwaitTx(cmd *cobra.Command) error {
|
func (c *ClientContext) AwaitTx(cmd *cobra.Command) error {
|
||||||
if len(c.SentTxs) == 0 || c.AwaitDisabled {
|
if len(c.SentTxs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package helper
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||||
|
@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
if slices.Contains(NetmapConfigKeys, k) {
|
for _, key := range NetmapConfigKeys {
|
||||||
md[k] = v
|
if k == key {
|
||||||
|
md[k] = v
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -39,7 +39,6 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
initCtx.AwaitDisabled = true
|
|
||||||
cmd.Println("Stage 4.1: Transfer GAS to proxy contract.")
|
cmd.Println("Stage 4.1: Transfer GAS to proxy contract.")
|
||||||
if err := transferGASToProxy(initCtx); err != nil {
|
if err := transferGASToProxy(initCtx); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -56,10 +55,5 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Println("Stage 7: set addresses in NNS.")
|
cmd.Println("Stage 7: set addresses in NNS.")
|
||||||
if err := setNNS(initCtx); err != nil {
|
return setNNS(initCtx)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
initCtx.AwaitDisabled = false
|
|
||||||
return initCtx.AwaitTx()
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package initialize
|
package initialize
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
@ -10,8 +11,11 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
@ -26,8 +30,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
|
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
|
||||||
reader := neo.NewReader(c.ReadOnlyInvoker)
|
regPrice, err := getCandidateRegisterPrice(c)
|
||||||
regPrice, err := reader.GetRegisterPrice()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't fetch registration price: %w", err)
|
return fmt.Errorf("can't fetch registration price: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -113,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
|
||||||
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
|
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
|
||||||
neoHash := neo.Hash
|
neoHash := neo.Hash
|
||||||
|
|
||||||
ok, err := transferNEOFinished(c)
|
ok, err := transferNEOFinished(c, neoHash)
|
||||||
if ok || err != nil {
|
if ok || err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -136,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
|
||||||
return c.AwaitTx()
|
return c.AwaitTx()
|
||||||
}
|
}
|
||||||
|
|
||||||
func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
|
func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
|
||||||
r := neo.NewReader(c.ReadOnlyInvoker)
|
r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
|
||||||
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
|
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
|
||||||
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
|
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
|
||||||
|
|
||||||
|
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
|
||||||
|
switch c.Client.(type) {
|
||||||
|
case *rpcclient.Client:
|
||||||
|
inv := invoker.New(c.Client, nil)
|
||||||
|
reader := neo.NewReader(inv)
|
||||||
|
return reader.GetRegisterPrice()
|
||||||
|
default:
|
||||||
|
neoHash := neo.Hash
|
||||||
|
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(res.Stack) == 0 {
|
||||||
|
return 0, errGetPriceInvalid
|
||||||
|
}
|
||||||
|
bi, err := res.Stack[0].TryInteger()
|
||||||
|
if err != nil || !bi.IsInt64() {
|
||||||
|
return 0, errGetPriceInvalid
|
||||||
|
}
|
||||||
|
return bi.Int64(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -22,14 +22,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
gasInitialTotalSupply = 30000000 * native.GASFactor
|
||||||
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
|
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
|
||||||
initialAlphabetGASAmount = 10_000 * native.GASFactor
|
initialAlphabetGASAmount = 10_000 * native.GASFactor
|
||||||
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
|
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
|
||||||
initialProxyGASAmount = 50_000 * native.GASFactor
|
initialProxyGASAmount = 50_000 * native.GASFactor
|
||||||
)
|
)
|
||||||
|
|
||||||
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
|
func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
|
||||||
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
|
return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
|
||||||
}
|
}
|
||||||
|
|
||||||
func transferFunds(c *helper.InitializeContext) error {
|
func transferFunds(c *helper.InitializeContext) error {
|
||||||
|
@ -41,11 +42,6 @@ func transferFunds(c *helper.InitializeContext) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := c.Client.GetVersion()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var transfers []transferTarget
|
var transfers []transferTarget
|
||||||
for _, acc := range c.Accounts {
|
for _, acc := range c.Accounts {
|
||||||
to := acc.Contract.ScriptHash()
|
to := acc.Contract.ScriptHash()
|
||||||
|
@ -63,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
|
||||||
transferTarget{
|
transferTarget{
|
||||||
Token: gas.Hash,
|
Token: gas.Hash,
|
||||||
Address: c.CommitteeAcc.Contract.ScriptHash(),
|
Address: c.CommitteeAcc.Contract.ScriptHash(),
|
||||||
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
|
Amount: initialCommitteeGASAmount(c),
|
||||||
},
|
},
|
||||||
transferTarget{
|
transferTarget{
|
||||||
Token: neo.Hash,
|
Token: neo.Hash,
|
||||||
|
@ -87,23 +83,16 @@ func transferFunds(c *helper.InitializeContext) error {
|
||||||
// transferFundsFinished checks balances of accounts we transfer GAS to.
|
// transferFundsFinished checks balances of accounts we transfer GAS to.
|
||||||
// The stage is considered finished if the balance is greater than the half of what we need to transfer.
|
// The stage is considered finished if the balance is greater than the half of what we need to transfer.
|
||||||
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
|
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
|
||||||
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
|
acc := c.Accounts[0]
|
||||||
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := c.Client.GetVersion()
|
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
|
||||||
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
|
res, err := r.BalanceOf(acc.Contract.ScriptHash())
|
||||||
|
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
|
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
|
||||||
if err != nil {
|
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func transferGASToProxy(c *helper.InitializeContext) error {
|
func transferGASToProxy(c *helper.InitializeContext) error {
|
||||||
|
|
|
@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||||
|
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
|
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
|
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
|
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
|
||||||
|
|
||||||
_ = tw.Flush()
|
_ = tw.Flush()
|
||||||
cmd.Print(buf.String())
|
cmd.Print(buf.String())
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
|
||||||
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
||||||
|
@ -40,6 +41,7 @@ func init() {
|
||||||
|
|
||||||
rootCmd.AddCommand(config.RootCmd)
|
rootCmd.AddCommand(config.RootCmd)
|
||||||
rootCmd.AddCommand(morph.RootCmd)
|
rootCmd.AddCommand(morph.RootCmd)
|
||||||
|
rootCmd.AddCommand(storagecfg.RootCmd)
|
||||||
rootCmd.AddCommand(metabase.RootCmd)
|
rootCmd.AddCommand(metabase.RootCmd)
|
||||||
|
|
||||||
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
||||||
|
|
137
cmd/frostfs-adm/internal/modules/storagecfg/config.go
Normal file
137
cmd/frostfs-adm/internal/modules/storagecfg/config.go
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
package storagecfg
|
||||||
|
|
||||||
|
const configTemplate = `logger:
|
||||||
|
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
|
||||||
|
|
||||||
|
node:
|
||||||
|
wallet:
|
||||||
|
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
|
||||||
|
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
|
||||||
|
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
|
||||||
|
addresses: # list of addresses announced by Storage node in the Network map
|
||||||
|
- {{ .AnnouncedAddress }}
|
||||||
|
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
|
||||||
|
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
|
||||||
|
|
||||||
|
grpc:
|
||||||
|
num: 1 # total number of listener endpoints
|
||||||
|
0:
|
||||||
|
endpoint: {{ .Endpoint }} # endpoint for gRPC server
|
||||||
|
tls:{{if .TLSCert}}
|
||||||
|
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
|
||||||
|
certificate: {{ .TLSCert }} # path to TLS certificate
|
||||||
|
key: {{ .TLSKey }} # path to TLS key
|
||||||
|
{{- else }}
|
||||||
|
enabled: false # disable TLS for a gRPC connection
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
|
control:
|
||||||
|
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
|
||||||
|
{{- range .AuthorizedKeys }}
|
||||||
|
- {{.}}{{end}}
|
||||||
|
grpc:
|
||||||
|
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
|
||||||
|
|
||||||
|
morph:
|
||||||
|
dial_timeout: 20s # timeout for side chain NEO RPC client connection
|
||||||
|
cache_ttl: 15s # use TTL cache for side chain GET operations
|
||||||
|
rpc_endpoint: # side chain N3 RPC endpoints
|
||||||
|
{{- range .MorphRPC }}
|
||||||
|
- address: wss://{{.}}/ws{{end}}
|
||||||
|
{{if not .Relay }}
|
||||||
|
storage:
|
||||||
|
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
|
||||||
|
|
||||||
|
shard:
|
||||||
|
default: # section with the default shard parameters
|
||||||
|
metabase:
|
||||||
|
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
||||||
|
|
||||||
|
blobstor:
|
||||||
|
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||||
|
depth: 2 # max depth of object tree storage in FS
|
||||||
|
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
|
||||||
|
compress: true # turn on/off Zstandard compression (level 3) of stored objects
|
||||||
|
compression_exclude_content_types:
|
||||||
|
- audio/*
|
||||||
|
- video/*
|
||||||
|
|
||||||
|
blobovnicza:
|
||||||
|
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
||||||
|
depth: 1 # max depth of object tree storage in key-value DB
|
||||||
|
width: 4 # max width of object tree storage in key-value DB
|
||||||
|
opened_cache_capacity: 50 # maximum number of opened database files
|
||||||
|
opened_cache_ttl: 5m # ttl for opened database file
|
||||||
|
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
|
||||||
|
|
||||||
|
gc:
|
||||||
|
remover_batch_size: 200 # number of objects to be removed by the garbage collector
|
||||||
|
remover_sleep_interval: 5m # frequency of the garbage collector invocation
|
||||||
|
0:
|
||||||
|
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
|
||||||
|
|
||||||
|
metabase:
|
||||||
|
path: {{ .MetabasePath }} # path to the metabase
|
||||||
|
|
||||||
|
blobstor:
|
||||||
|
path: {{ .BlobstorPath }} # path to the blobstor
|
||||||
|
{{end}}`
|
||||||
|
|
||||||
|
const (
|
||||||
|
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
|
||||||
|
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
|
||||||
|
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
|
||||||
|
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var n3config = map[string]struct {
|
||||||
|
MorphRPC []string
|
||||||
|
RPC []string
|
||||||
|
NeoFSContract string
|
||||||
|
BalanceContract string
|
||||||
|
}{
|
||||||
|
"testnet": {
|
||||||
|
MorphRPC: []string{
|
||||||
|
"rpc01.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc02.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc03.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc04.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc05.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc06.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc07.morph.testnet.fs.neo.org:51331",
|
||||||
|
},
|
||||||
|
RPC: []string{
|
||||||
|
"rpc01.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc02.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc03.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc04.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc05.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc06.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc07.testnet.n3.nspcc.ru:21331",
|
||||||
|
},
|
||||||
|
NeoFSContract: neofsTestnetAddress,
|
||||||
|
BalanceContract: balanceTestnetAddress,
|
||||||
|
},
|
||||||
|
"mainnet": {
|
||||||
|
MorphRPC: []string{
|
||||||
|
"rpc1.morph.fs.neo.org:40341",
|
||||||
|
"rpc2.morph.fs.neo.org:40341",
|
||||||
|
"rpc3.morph.fs.neo.org:40341",
|
||||||
|
"rpc4.morph.fs.neo.org:40341",
|
||||||
|
"rpc5.morph.fs.neo.org:40341",
|
||||||
|
"rpc6.morph.fs.neo.org:40341",
|
||||||
|
"rpc7.morph.fs.neo.org:40341",
|
||||||
|
},
|
||||||
|
RPC: []string{
|
||||||
|
"rpc1.n3.nspcc.ru:10331",
|
||||||
|
"rpc2.n3.nspcc.ru:10331",
|
||||||
|
"rpc3.n3.nspcc.ru:10331",
|
||||||
|
"rpc4.n3.nspcc.ru:10331",
|
||||||
|
"rpc5.n3.nspcc.ru:10331",
|
||||||
|
"rpc6.n3.nspcc.ru:10331",
|
||||||
|
"rpc7.n3.nspcc.ru:10331",
|
||||||
|
},
|
||||||
|
NeoFSContract: neofsMainnetAddress,
|
||||||
|
BalanceContract: balanceMainnetAddress,
|
||||||
|
},
|
||||||
|
}
|
433
cmd/frostfs-adm/internal/modules/storagecfg/root.go
Normal file
433
cmd/frostfs-adm/internal/modules/storagecfg/root.go
Normal file
|
@ -0,0 +1,433 @@
|
||||||
|
package storagecfg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||||
|
"github.com/chzyer/readline"
|
||||||
|
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||||
|
"github.com/nspcc-dev/neo-go/cli/input"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
walletFlag = "wallet"
|
||||||
|
accountFlag = "account"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultControlEndpoint = "localhost:8090"
|
||||||
|
defaultDataEndpoint = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RootCmd is a root command of config section.
|
||||||
|
var RootCmd = &cobra.Command{
|
||||||
|
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
|
||||||
|
Short: "Section for storage node configuration commands",
|
||||||
|
Run: storageConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fs := RootCmd.Flags()
|
||||||
|
|
||||||
|
fs.StringP(walletFlag, "w", "", "Path to wallet")
|
||||||
|
fs.StringP(accountFlag, "a", "", "Wallet account")
|
||||||
|
}
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
AnnouncedAddress string
|
||||||
|
AuthorizedKeys []string
|
||||||
|
ControlEndpoint string
|
||||||
|
Endpoint string
|
||||||
|
TLSCert string
|
||||||
|
TLSKey string
|
||||||
|
MorphRPC []string
|
||||||
|
Attribute struct {
|
||||||
|
Locode string
|
||||||
|
}
|
||||||
|
Wallet struct {
|
||||||
|
Path string
|
||||||
|
Account string
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
Relay bool
|
||||||
|
BlobstorPath string
|
||||||
|
MetabasePath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func storageConfig(cmd *cobra.Command, args []string) {
|
||||||
|
outPath := getOutputPath(args)
|
||||||
|
|
||||||
|
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
|
||||||
|
readline.SetHistoryPath(historyPath)
|
||||||
|
|
||||||
|
var c config
|
||||||
|
|
||||||
|
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
|
||||||
|
if c.Wallet.Path == "" {
|
||||||
|
c.Wallet.Path = getPath("Path to the storage node wallet: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
fillWalletAccount(cmd, &c, w)
|
||||||
|
|
||||||
|
accH, err := flags.ParseAddress(c.Wallet.Account)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
acc := w.GetAccount(accH)
|
||||||
|
if acc == nil {
|
||||||
|
fatalOnErr(errors.New("can't find account in wallet"))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
|
||||||
|
|
||||||
|
network := readNetwork(cmd)
|
||||||
|
|
||||||
|
c.MorphRPC = n3config[network].MorphRPC
|
||||||
|
|
||||||
|
depositGas(cmd, acc, network)
|
||||||
|
|
||||||
|
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
|
||||||
|
|
||||||
|
endpoint := getDefaultEndpoint(cmd, &c)
|
||||||
|
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
|
||||||
|
if c.Endpoint == "" {
|
||||||
|
c.Endpoint = endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
|
||||||
|
if c.ControlEndpoint == "" {
|
||||||
|
c.ControlEndpoint = defaultControlEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
c.TLSCert = getPath("TLS Certificate (optional): ")
|
||||||
|
if c.TLSCert != "" {
|
||||||
|
c.TLSKey = getPath("TLS Key: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
|
||||||
|
if !c.Relay {
|
||||||
|
p := getPath("Path to the storage directory (all available storage will be used): ")
|
||||||
|
c.BlobstorPath = filepath.Join(p, "blob")
|
||||||
|
c.MetabasePath = filepath.Join(p, "meta")
|
||||||
|
}
|
||||||
|
|
||||||
|
out := applyTemplate(c)
|
||||||
|
fatalOnErr(os.WriteFile(outPath, out, 0o644))
|
||||||
|
|
||||||
|
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
|
||||||
|
var addr, port string
|
||||||
|
for {
|
||||||
|
c.AnnouncedAddress = getString("Publicly announced address: ")
|
||||||
|
validator := netutil.Address{}
|
||||||
|
err := validator.FromString(c.AnnouncedAddress)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uriAddr, err := url.Parse(validator.URIAddr())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
|
}
|
||||||
|
addr = uriAddr.Hostname()
|
||||||
|
port = uriAddr.Port()
|
||||||
|
ip, err := net.ResolveIPAddr("ip", addr)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ip.IP.IsGlobalUnicast() {
|
||||||
|
cmd.Println("IP must be global unicast.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cmd.Printf("Resolved IP address: %s\n", ip.String())
|
||||||
|
|
||||||
|
_, err = strconv.ParseUint(port, 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Println("Port must be an integer.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return net.JoinHostPort(defaultDataEndpoint, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
|
||||||
|
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
|
||||||
|
if c.Wallet.Account == "" {
|
||||||
|
addr := address.Uint160ToString(w.GetChangeAddress())
|
||||||
|
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
|
||||||
|
if c.Wallet.Account == "" {
|
||||||
|
c.Wallet.Account = addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readNetwork(cmd *cobra.Command) string {
|
||||||
|
var network string
|
||||||
|
for {
|
||||||
|
network = getString("Choose network [mainnet]/testnet: ")
|
||||||
|
switch network {
|
||||||
|
case "":
|
||||||
|
network = "mainnet"
|
||||||
|
case "testnet", "mainnet":
|
||||||
|
default:
|
||||||
|
cmd.Println(`Network must be either "mainnet" or "testnet"`)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return network
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOutputPath(args []string) string {
|
||||||
|
if len(args) != 0 {
|
||||||
|
return args[0]
|
||||||
|
}
|
||||||
|
outPath := getPath("File to write config at [./config.yml]: ")
|
||||||
|
if outPath == "" {
|
||||||
|
outPath = "./config.yml"
|
||||||
|
}
|
||||||
|
return outPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func getWalletAccount(w *wallet.Wallet, prompt string) string {
|
||||||
|
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
|
||||||
|
for i := range w.Accounts {
|
||||||
|
addrs[i] = readline.PcItem(w.Accounts[i].Address)
|
||||||
|
}
|
||||||
|
|
||||||
|
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
|
||||||
|
defer readline.SetAutoComplete(nil)
|
||||||
|
|
||||||
|
s, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
|
||||||
|
}
|
||||||
|
|
||||||
|
func getString(prompt string) string {
|
||||||
|
s, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
if s != "" {
|
||||||
|
_ = readline.AddHistory(s)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type filenameCompleter struct{}
|
||||||
|
|
||||||
|
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
|
||||||
|
prefix := string(line[:pos])
|
||||||
|
dir := filepath.Dir(prefix)
|
||||||
|
de, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range de {
|
||||||
|
name := filepath.Join(dir, de[i].Name())
|
||||||
|
if strings.HasPrefix(name, prefix) {
|
||||||
|
tail := []rune(strings.TrimPrefix(name, prefix))
|
||||||
|
if de[i].IsDir() {
|
||||||
|
tail = append(tail, filepath.Separator)
|
||||||
|
}
|
||||||
|
newLine = append(newLine, tail)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pos != 0 {
|
||||||
|
return newLine, pos - len([]rune(dir))
|
||||||
|
}
|
||||||
|
return newLine, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPath(prompt string) string {
|
||||||
|
readline.SetAutoComplete(filenameCompleter{})
|
||||||
|
defer readline.SetAutoComplete(nil)
|
||||||
|
|
||||||
|
p, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
if p == "" {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = readline.AddHistory(p)
|
||||||
|
|
||||||
|
abs, err := filepath.Abs(p)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return abs
|
||||||
|
}
|
||||||
|
|
||||||
|
func getConfirmation(def bool, prompt string) bool {
|
||||||
|
for {
|
||||||
|
s, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "y", "yes":
|
||||||
|
return true
|
||||||
|
case "n", "no":
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
if len(s) == 0 {
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyTemplate(c config) []byte {
|
||||||
|
tmpl, err := template.New("config").Parse(configTemplate)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
fatalOnErr(tmpl.Execute(b, c))
|
||||||
|
|
||||||
|
return b.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalOnErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
|
||||||
|
sideClient := initClient(n3config[network].MorphRPC)
|
||||||
|
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
|
||||||
|
|
||||||
|
sideActor, err := actor.NewSimple(sideClient, acc)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
sideGas := nep17.NewReader(sideActor, balanceHash)
|
||||||
|
accSH := acc.Contract.ScriptHash()
|
||||||
|
|
||||||
|
balance, err := sideGas.BalanceOf(accSH)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
|
||||||
|
fixedn.ToString(balance, 12)))
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
amountStr := getString("Enter amount in GAS: ")
|
||||||
|
amount, err := fixedn.FromString(amountStr, 8)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
mainClient := initClient(n3config[network].RPC)
|
||||||
|
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
|
||||||
|
|
||||||
|
mainActor, err := actor.NewSimple(mainClient, acc)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
mainGas := nep17.New(mainActor, gas.Hash)
|
||||||
|
|
||||||
|
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Print("Waiting for transactions to persist.")
|
||||||
|
tick := time.NewTicker(time.Second / 2)
|
||||||
|
defer tick.Stop()
|
||||||
|
|
||||||
|
timer := time.NewTimer(time.Second * 20)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
at := trigger.Application
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
_, err := mainClient.GetApplicationLog(txHash, &at)
|
||||||
|
if err == nil {
|
||||||
|
cmd.Print("\n")
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
cmd.Print(".")
|
||||||
|
case <-timer.C:
|
||||||
|
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
|
||||||
|
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initClient(rpc []string) *rpcclient.Client {
|
||||||
|
var c *rpcclient.Client
|
||||||
|
var err error
|
||||||
|
|
||||||
|
shuffled := slices.Clone(rpc)
|
||||||
|
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||||
|
|
||||||
|
for _, endpoint := range shuffled {
|
||||||
|
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
|
||||||
|
DialTimeout: time.Second * 2,
|
||||||
|
RequestTimeout: time.Second * 5,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err = c.Init(); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||||
|
@ -76,7 +77,9 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
||||||
// SortedIDList returns sorted list of identifiers of user's containers.
|
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||||
list := x.cliRes.Containers()
|
list := x.cliRes.Containers()
|
||||||
slices.SortFunc(list, cid.ID.Cmp)
|
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
|
||||||
|
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
|
||||||
|
})
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -684,7 +687,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
||||||
return nil, fmt.Errorf("read object list: %w", err)
|
return nil, fmt.Errorf("read object list: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.SortFunc(list, oid.ID.Cmp)
|
slices.SortFunc(list, func(a, b oid.ID) int {
|
||||||
|
return strings.Compare(a.EncodeToString(), b.EncodeToString())
|
||||||
|
})
|
||||||
|
|
||||||
return &SearchObjectsRes{
|
return &SearchObjectsRes{
|
||||||
ids: list,
|
ids: list,
|
||||||
|
|
|
@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
|
||||||
prmDial := client.PrmDial{
|
prmDial := client.PrmDial{
|
||||||
Endpoint: addr.URIAddr(),
|
Endpoint: addr.URIAddr(),
|
||||||
GRPCDialOptions: []grpc.DialOption{
|
GRPCDialOptions: []grpc.DialOption{
|
||||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
|
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
||||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||||
},
|
},
|
||||||
|
|
|
@ -28,7 +28,7 @@ const (
|
||||||
RPC = "rpc-endpoint"
|
RPC = "rpc-endpoint"
|
||||||
RPCShorthand = "r"
|
RPCShorthand = "r"
|
||||||
RPCDefault = ""
|
RPCDefault = ""
|
||||||
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')"
|
RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
|
||||||
|
|
||||||
Timeout = "timeout"
|
Timeout = "timeout"
|
||||||
TimeoutShorthand = "t"
|
TimeoutShorthand = "t"
|
||||||
|
|
|
@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
|
||||||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||||
_, _ = tw.Write([]byte("#\tName\tType\n"))
|
_, _ = tw.Write([]byte("#\tName\tType\n"))
|
||||||
for i, t := range targets {
|
for i, t := range targets {
|
||||||
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
|
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
|
||||||
}
|
}
|
||||||
_ = tw.Flush()
|
_ = tw.Flush()
|
||||||
cmd.Print(buf.String())
|
cmd.Print(buf.String())
|
||||||
|
|
|
@ -1,117 +0,0 @@
|
||||||
package control
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
|
||||||
object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
|
|
||||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
|
||||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
FullInfoFlag = "full"
|
|
||||||
FullInfoFlagUsage = "Print full ShardInfo."
|
|
||||||
)
|
|
||||||
|
|
||||||
var locateObjectCmd = &cobra.Command{
|
|
||||||
Use: "locate-object",
|
|
||||||
Short: "List shards storing the object",
|
|
||||||
Long: "List shards storing the object",
|
|
||||||
Run: locateObject,
|
|
||||||
}
|
|
||||||
|
|
||||||
func initControlLocateObjectCmd() {
|
|
||||||
initControlFlags(locateObjectCmd)
|
|
||||||
|
|
||||||
flags := locateObjectCmd.Flags()
|
|
||||||
|
|
||||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
|
||||||
_ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
|
|
||||||
|
|
||||||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
|
||||||
_ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
|
|
||||||
|
|
||||||
flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
|
|
||||||
flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
|
|
||||||
}
|
|
||||||
|
|
||||||
func locateObject(cmd *cobra.Command, _ []string) {
|
|
||||||
var cnr cid.ID
|
|
||||||
var obj oid.ID
|
|
||||||
|
|
||||||
_ = object.ReadObjectAddress(cmd, &cnr, &obj)
|
|
||||||
|
|
||||||
pk := key.Get(cmd)
|
|
||||||
|
|
||||||
body := new(control.ListShardsForObjectRequest_Body)
|
|
||||||
body.SetContainerId(cnr.EncodeToString())
|
|
||||||
body.SetObjectId(obj.EncodeToString())
|
|
||||||
req := new(control.ListShardsForObjectRequest)
|
|
||||||
req.SetBody(body)
|
|
||||||
signRequest(cmd, pk, req)
|
|
||||||
|
|
||||||
cli := getClient(cmd, pk)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
var resp *control.ListShardsForObjectResponse
|
|
||||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
|
||||||
resp, err = control.ListShardsForObject(client, req)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
|
||||||
|
|
||||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
|
||||||
|
|
||||||
shardIDs := resp.GetBody().GetShard_ID()
|
|
||||||
|
|
||||||
isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
|
|
||||||
if !isFull {
|
|
||||||
for _, id := range shardIDs {
|
|
||||||
cmd.Println(base58.Encode(id))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get full shard info
|
|
||||||
listShardsReq := new(control.ListShardsRequest)
|
|
||||||
listShardsReq.SetBody(new(control.ListShardsRequest_Body))
|
|
||||||
signRequest(cmd, pk, listShardsReq)
|
|
||||||
var listShardsResp *control.ListShardsResponse
|
|
||||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
|
||||||
listShardsResp, err = control.ListShards(client, listShardsReq)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
|
||||||
|
|
||||||
verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
|
|
||||||
|
|
||||||
shards := listShardsResp.GetBody().GetShards()
|
|
||||||
sortShardsByID(shards)
|
|
||||||
shards = filterShards(shards, shardIDs)
|
|
||||||
|
|
||||||
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
|
|
||||||
if isJSON {
|
|
||||||
prettyPrintShardsJSON(cmd, shards)
|
|
||||||
} else {
|
|
||||||
prettyPrintShards(cmd, shards)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
|
|
||||||
var res []control.ShardInfo
|
|
||||||
for _, id := range ids {
|
|
||||||
for _, inf := range info {
|
|
||||||
if bytes.Equal(inf.Shard_ID, id) {
|
|
||||||
res = append(res, inf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
|
@ -39,7 +39,6 @@ func init() {
|
||||||
listRulesCmd,
|
listRulesCmd,
|
||||||
getRuleCmd,
|
getRuleCmd,
|
||||||
listTargetsCmd,
|
listTargetsCmd,
|
||||||
locateObjectCmd,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
initControlHealthCheckCmd()
|
initControlHealthCheckCmd()
|
||||||
|
@ -53,5 +52,4 @@ func init() {
|
||||||
initControlListRulesCmd()
|
initControlListRulesCmd()
|
||||||
initControGetRuleCmd()
|
initControGetRuleCmd()
|
||||||
initControlListTargetsCmd()
|
initControlListTargetsCmd()
|
||||||
initControlLocateObjectCmd()
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
|
||||||
var sealWritecacheShardCmd = &cobra.Command{
|
var sealWritecacheShardCmd = &cobra.Command{
|
||||||
Use: "seal",
|
Use: "seal",
|
||||||
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
|
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
|
||||||
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
|
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
|
||||||
Run: sealWritecache,
|
Run: sealWritecache,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
|
||||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
|
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
|
||||||
}
|
}
|
||||||
|
|
||||||
objAddr = ReadObjectAddress(cmd, &cnr, &obj)
|
objAddr = readObjectAddress(cmd, &cnr, &obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
pk := key.GetOrGenerate(cmd)
|
pk := key.GetOrGenerate(cmd)
|
||||||
|
|
|
@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
|
||||||
var cnr cid.ID
|
var cnr cid.ID
|
||||||
var obj oid.ID
|
var obj oid.ID
|
||||||
|
|
||||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||||
|
|
||||||
filename := cmd.Flag(fileFlag).Value.String()
|
filename := cmd.Flag(fileFlag).Value.String()
|
||||||
out, closer := createOutWriter(cmd, filename)
|
out, closer := createOutWriter(cmd, filename)
|
||||||
|
|
|
@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
||||||
var cnr cid.ID
|
var cnr cid.ID
|
||||||
var obj oid.ID
|
var obj oid.ID
|
||||||
|
|
||||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||||
|
|
||||||
ranges, err := getRangeList(cmd)
|
ranges, err := getRangeList(cmd)
|
||||||
commonCmd.ExitOnErr(cmd, "", err)
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
|
|
@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
|
||||||
var cnr cid.ID
|
var cnr cid.ID
|
||||||
var obj oid.ID
|
var obj oid.ID
|
||||||
|
|
||||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||||
pk := key.GetOrGenerate(cmd)
|
pk := key.GetOrGenerate(cmd)
|
||||||
|
|
||||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||||
|
|
|
@ -101,7 +101,7 @@ func initObjectNodesCmd() {
|
||||||
func objectNodes(cmd *cobra.Command, _ []string) {
|
func objectNodes(cmd *cobra.Command, _ []string) {
|
||||||
var cnrID cid.ID
|
var cnrID cid.ID
|
||||||
var objID oid.ID
|
var objID oid.ID
|
||||||
ReadObjectAddress(cmd, &cnrID, &objID)
|
readObjectAddress(cmd, &cnrID, &objID)
|
||||||
|
|
||||||
pk := key.GetOrGenerate(cmd)
|
pk := key.GetOrGenerate(cmd)
|
||||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||||
|
|
|
@ -56,7 +56,7 @@ func patch(cmd *cobra.Command, _ []string) {
|
||||||
var cnr cid.ID
|
var cnr cid.ID
|
||||||
var obj oid.ID
|
var obj oid.ID
|
||||||
|
|
||||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||||
|
|
||||||
ranges, err := getRangeSlice(cmd)
|
ranges, err := getRangeSlice(cmd)
|
||||||
commonCmd.ExitOnErr(cmd, "", err)
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
|
|
@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
|
||||||
var cnr cid.ID
|
var cnr cid.ID
|
||||||
var obj oid.ID
|
var obj oid.ID
|
||||||
|
|
||||||
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
|
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
||||||
|
|
||||||
ranges, err := getRangeList(cmd)
|
ranges, err := getRangeList(cmd)
|
||||||
commonCmd.ExitOnErr(cmd, "", err)
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
|
|
@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
|
||||||
return xs
|
return xs
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
|
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
|
||||||
readCID(cmd, cnr)
|
readCID(cmd, cnr)
|
||||||
readOID(cmd, obj)
|
readOID(cmd, obj)
|
||||||
|
|
||||||
|
|
|
@ -33,13 +33,12 @@ func _client() (tree.TreeServiceClient, error) {
|
||||||
|
|
||||||
opts := []grpc.DialOption{
|
opts := []grpc.DialOption{
|
||||||
grpc.WithChainUnaryInterceptor(
|
grpc.WithChainUnaryInterceptor(
|
||||||
tracing.NewUnaryClientInterceptor(),
|
tracing.NewUnaryClientInteceptor(),
|
||||||
),
|
),
|
||||||
grpc.WithChainStreamInterceptor(
|
grpc.WithChainStreamInterceptor(
|
||||||
tracing.NewStreamClientInterceptor(),
|
tracing.NewStreamClientInterceptor(),
|
||||||
),
|
),
|
||||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||||
grpc.WithDisableServiceConfig(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
|
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -39,14 +38,13 @@ func reloadConfig() error {
|
||||||
}
|
}
|
||||||
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
||||||
audit.Store(cfg.GetBool("audit.enabled"))
|
audit.Store(cfg.GetBool("audit.enabled"))
|
||||||
var logPrm logger.Prm
|
|
||||||
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Reload(logPrm)
|
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
||||||
|
|
||||||
return nil
|
return logPrm.Reload()
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchForSignal(ctx context.Context, cancel func()) {
|
func watchForSignal(ctx context.Context, cancel func()) {
|
||||||
|
|
|
@ -31,6 +31,7 @@ const (
|
||||||
var (
|
var (
|
||||||
wg = new(sync.WaitGroup)
|
wg = new(sync.WaitGroup)
|
||||||
intErr = make(chan error) // internal inner ring errors
|
intErr = make(chan error) // internal inner ring errors
|
||||||
|
logPrm = new(logger.Prm)
|
||||||
innerRing *innerring.Server
|
innerRing *innerring.Server
|
||||||
pprofCmp *pprofComponent
|
pprofCmp *pprofComponent
|
||||||
metricsCmp *httpComponent
|
metricsCmp *httpComponent
|
||||||
|
@ -69,7 +70,6 @@ func main() {
|
||||||
|
|
||||||
metrics := irMetrics.NewInnerRingMetrics()
|
metrics := irMetrics.NewInnerRingMetrics()
|
||||||
|
|
||||||
var logPrm logger.Prm
|
|
||||||
err = logPrm.SetLevelString(
|
err = logPrm.SetLevelString(
|
||||||
cfg.GetString("logger.level"),
|
cfg.GetString("logger.level"),
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,17 +2,13 @@ package meta
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
||||||
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
|
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
|
||||||
"github.com/rivo/tview"
|
"github.com/rivo/tview"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"go.etcd.io/bbolt"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var tuiCMD = &cobra.Command{
|
var tuiCMD = &cobra.Command{
|
||||||
|
@ -31,11 +27,6 @@ Available search filters:
|
||||||
|
|
||||||
var initialPrompt string
|
var initialPrompt string
|
||||||
|
|
||||||
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
|
|
||||||
2: schema.MetabaseParserV2,
|
|
||||||
3: schema.MetabaseParserV3,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
common.AddComponentPathFlag(tuiCMD, &vPath)
|
common.AddComponentPathFlag(tuiCMD, &vPath)
|
||||||
|
|
||||||
|
@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error {
|
||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
|
|
||||||
if !hasVersion {
|
|
||||||
return errors.New("couldn't detect schema version")
|
|
||||||
}
|
|
||||||
|
|
||||||
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unknown schema version %d", schemaVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need if app was stopped with Ctrl-C.
|
// Need if app was stopped with Ctrl-C.
|
||||||
ctx, cancel := context.WithCancel(cmd.Context())
|
ctx, cancel := context.WithCancel(cmd.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
app := tview.NewApplication()
|
app := tview.NewApplication()
|
||||||
ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
|
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
|
||||||
|
|
||||||
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
|
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
|
||||||
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
|
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
|
||||||
|
@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error {
|
||||||
app.SetRoot(ui, true).SetFocus(ui)
|
app.SetRoot(ui, true).SetFocus(ui)
|
||||||
return app.Run()
|
return app.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
shardInfoBucket = []byte{5}
|
|
||||||
versionRecord = []byte("version")
|
|
||||||
)
|
|
||||||
|
|
||||||
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
|
|
||||||
err := db.View(func(tx *bbolt.Tx) error {
|
|
||||||
bkt := tx.Bucket(shardInfoBucket)
|
|
||||||
if bkt == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
rec := bkt.Get(versionRecord)
|
|
||||||
if rec == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
version = binary.LittleEndian.Uint64(rec)
|
|
||||||
ok = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
|
@ -80,15 +80,10 @@ var (
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
|
UserAttributeParser = NewUserAttributeKeyBucketParser(
|
||||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
||||||
)
|
)
|
||||||
|
|
||||||
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
|
|
||||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
|
||||||
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
|
|
||||||
)
|
|
||||||
|
|
||||||
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
|
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
|
||||||
cidResolver: StrictResolver,
|
cidResolver: StrictResolver,
|
||||||
oidResolver: StrictResolver,
|
oidResolver: StrictResolver,
|
||||||
|
@ -113,14 +108,4 @@ var (
|
||||||
cidResolver: StrictResolver,
|
cidResolver: StrictResolver,
|
||||||
oidResolver: LenientResolver,
|
oidResolver: LenientResolver,
|
||||||
})
|
})
|
||||||
|
|
||||||
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
|
|
||||||
cidResolver: LenientResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,31 +22,27 @@ const (
|
||||||
Split
|
Split
|
||||||
ContainerCounters
|
ContainerCounters
|
||||||
ECInfo
|
ECInfo
|
||||||
ExpirationEpochToObject
|
|
||||||
ObjectToExpirationEpoch
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var x = map[Prefix]string{
|
var x = map[Prefix]string{
|
||||||
Graveyard: "Graveyard",
|
Graveyard: "Graveyard",
|
||||||
Garbage: "Garbage",
|
Garbage: "Garbage",
|
||||||
ToMoveIt: "To Move It",
|
ToMoveIt: "To Move It",
|
||||||
ContainerVolume: "Container Volume",
|
ContainerVolume: "Container Volume",
|
||||||
Locked: "Locked",
|
Locked: "Locked",
|
||||||
ShardInfo: "Shard Info",
|
ShardInfo: "Shard Info",
|
||||||
Primary: "Primary",
|
Primary: "Primary",
|
||||||
Lockers: "Lockers",
|
Lockers: "Lockers",
|
||||||
Tombstone: "Tombstone",
|
Tombstone: "Tombstone",
|
||||||
Small: "Small",
|
Small: "Small",
|
||||||
Root: "Root",
|
Root: "Root",
|
||||||
Owner: "Owner",
|
Owner: "Owner",
|
||||||
UserAttribute: "User Attribute",
|
UserAttribute: "User Attribute",
|
||||||
PayloadHash: "Payload Hash",
|
PayloadHash: "Payload Hash",
|
||||||
Parent: "Parent",
|
Parent: "Parent",
|
||||||
Split: "Split",
|
Split: "Split",
|
||||||
ContainerCounters: "Container Counters",
|
ContainerCounters: "Container Counters",
|
||||||
ECInfo: "EC Info",
|
ECInfo: "EC Info",
|
||||||
ExpirationEpochToObject: "Exp. Epoch to Object",
|
|
||||||
ObjectToExpirationEpoch: "Object to Exp. Epoch",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Prefix) String() string {
|
func (p Prefix) String() string {
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
|
|
||||||
func (b *PrefixBucket) String() string {
|
func (b *PrefixBucket) String() string {
|
||||||
return common.FormatSimple(
|
return common.FormatSimple(
|
||||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"%s CID %s",
|
"%s CID %s",
|
||||||
common.FormatSimple(
|
common.FormatSimple(
|
||||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||||
),
|
),
|
||||||
common.FormatSimple(b.id.String(), tcell.ColorAqua),
|
common.FormatSimple(b.id.String(), tcell.ColorAqua),
|
||||||
)
|
)
|
||||||
|
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
|
||||||
func (b *UserAttributeKeyBucket) String() string {
|
func (b *UserAttributeKeyBucket) String() string {
|
||||||
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
|
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
|
||||||
common.FormatSimple(
|
common.FormatSimple(
|
||||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||||
),
|
),
|
||||||
common.FormatSimple(
|
common.FormatSimple(
|
||||||
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
|
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
|
||||||
|
|
|
@ -2,7 +2,6 @@ package buckets
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"slices"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
@ -58,11 +57,10 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrNotBucket = errors.New("not a bucket")
|
ErrNotBucket = errors.New("not a bucket")
|
||||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
ErrInvalidKeyLength = errors.New("invalid key length")
|
||||||
ErrInvalidValueLength = errors.New("invalid value length")
|
ErrInvalidValueLength = errors.New("invalid value length")
|
||||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
ErrInvalidPrefix = errors.New("invalid prefix")
|
||||||
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
|
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
|
||||||
|
@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
||||||
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
||||||
if value != nil {
|
if value != nil {
|
||||||
return nil, nil, ErrNotBucket
|
return nil, nil, ErrNotBucket
|
||||||
|
@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
b.key = string(key[33:])
|
b.key = string(key[33:])
|
||||||
|
|
||||||
if len(keys) != 0 && !slices.Contains(keys, b.key) {
|
|
||||||
return nil, nil, ErrUnexpectedAttributeKey
|
|
||||||
}
|
|
||||||
|
|
||||||
return &b, next, nil
|
return &b, next, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,30 +5,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
|
||||||
)
|
)
|
||||||
|
|
||||||
var MetabaseParserV3 = common.WithFallback(
|
var MetabaseParser = common.WithFallback(
|
||||||
common.Any(
|
|
||||||
buckets.GraveyardParser,
|
|
||||||
buckets.GarbageParser,
|
|
||||||
buckets.ContainerVolumeParser,
|
|
||||||
buckets.LockedParser,
|
|
||||||
buckets.ShardInfoParser,
|
|
||||||
buckets.PrimaryParser,
|
|
||||||
buckets.LockersParser,
|
|
||||||
buckets.TombstoneParser,
|
|
||||||
buckets.SmallParser,
|
|
||||||
buckets.RootParser,
|
|
||||||
buckets.UserAttributeParserV3,
|
|
||||||
buckets.ParentParser,
|
|
||||||
buckets.SplitParser,
|
|
||||||
buckets.ContainerCountersParser,
|
|
||||||
buckets.ECInfoParser,
|
|
||||||
buckets.ExpirationEpochToObjectParser,
|
|
||||||
buckets.ObjectToExpirationEpochParser,
|
|
||||||
),
|
|
||||||
common.RawParser.ToFallbackParser(),
|
|
||||||
)
|
|
||||||
|
|
||||||
var MetabaseParserV2 = common.WithFallback(
|
|
||||||
common.Any(
|
common.Any(
|
||||||
buckets.GraveyardParser,
|
buckets.GraveyardParser,
|
||||||
buckets.GarbageParser,
|
buckets.GarbageParser,
|
||||||
|
@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
|
||||||
buckets.SmallParser,
|
buckets.SmallParser,
|
||||||
buckets.RootParser,
|
buckets.RootParser,
|
||||||
buckets.OwnerParser,
|
buckets.OwnerParser,
|
||||||
buckets.UserAttributeParserV2,
|
buckets.UserAttributeParser,
|
||||||
buckets.PayloadHashParser,
|
buckets.PayloadHashParser,
|
||||||
buckets.ParentParser,
|
buckets.ParentParser,
|
||||||
buckets.SplitParser,
|
buckets.SplitParser,
|
||||||
|
|
|
@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
|
||||||
func (r *ECInfoRecord) DetailedString() string {
|
func (r *ECInfoRecord) DetailedString() string {
|
||||||
return spew.Sdump(*r)
|
return spew.Sdump(*r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
|
@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
|
||||||
return common.No
|
return common.No
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
|
||||||
}
|
}
|
||||||
return &r, nil, nil
|
return &r, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 72 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
r ExpirationEpochToObjectRecord
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
r.epoch = binary.BigEndian.Uint64(key[:8])
|
|
||||||
if err = r.cnt.Decode(key[8:40]); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if err = r.obj.Decode(key[40:]); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 32 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
if len(value) != 8 {
|
|
||||||
return nil, nil, ErrInvalidValueLength
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
r ObjectToExpirationEpochRecord
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if err = r.obj.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
r.epoch = binary.LittleEndian.Uint64(value)
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,7 +2,6 @@ package records
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||||
"github.com/gdamore/tcell/v2"
|
"github.com/gdamore/tcell/v2"
|
||||||
|
@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
|
||||||
len(r.ids),
|
len(r.ids),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ExpirationEpochToObjectRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"exp. epoch %s %c CID %s OID %s",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ObjectToExpirationEpochRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"OID %s %c exp. epoch %s",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
|
@ -79,15 +79,4 @@ type (
|
||||||
id oid.ID
|
id oid.ID
|
||||||
ids []oid.ID
|
ids []oid.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
ExpirationEpochToObjectRecord struct {
|
|
||||||
epoch uint64
|
|
||||||
cnt cid.ID
|
|
||||||
obj oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectToExpirationEpochRecord struct {
|
|
||||||
obj oid.ID
|
|
||||||
epoch uint64
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package tui
|
package tui
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"slices"
|
|
||||||
|
|
||||||
"github.com/gdamore/tcell/v2"
|
"github.com/gdamore/tcell/v2"
|
||||||
"github.com/rivo/tview"
|
"github.com/rivo/tview"
|
||||||
)
|
)
|
||||||
|
@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
|
||||||
|
|
||||||
// Used history data for search prompt, so just make that data recent.
|
// Used history data for search prompt, so just make that data recent.
|
||||||
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
|
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
|
||||||
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
|
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
|
||||||
f.history = append(f.history, s)
|
f.history = append(f.history, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
||||||
|
@ -70,7 +69,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
|
||||||
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
@ -108,7 +106,6 @@ type applicationConfiguration struct {
|
||||||
level string
|
level string
|
||||||
destination string
|
destination string
|
||||||
timestamp bool
|
timestamp bool
|
||||||
options []zap.Option
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ObjectCfg struct {
|
ObjectCfg struct {
|
||||||
|
@ -118,6 +115,7 @@ type applicationConfiguration struct {
|
||||||
|
|
||||||
EngineCfg struct {
|
EngineCfg struct {
|
||||||
errorThreshold uint32
|
errorThreshold uint32
|
||||||
|
shardPoolSize uint32
|
||||||
shards []shardCfg
|
shards []shardCfg
|
||||||
lowMem bool
|
lowMem bool
|
||||||
}
|
}
|
||||||
|
@ -136,7 +134,6 @@ type shardCfg struct {
|
||||||
refillMetabase bool
|
refillMetabase bool
|
||||||
refillMetabaseWorkersCount int
|
refillMetabaseWorkersCount int
|
||||||
mode shardmode.Mode
|
mode shardmode.Mode
|
||||||
limiter qos.Limiter
|
|
||||||
|
|
||||||
metaCfg struct {
|
metaCfg struct {
|
||||||
path string
|
path string
|
||||||
|
@ -233,14 +230,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||||
a.LoggerCfg.level = loggerconfig.Level(c)
|
a.LoggerCfg.level = loggerconfig.Level(c)
|
||||||
a.LoggerCfg.destination = loggerconfig.Destination(c)
|
a.LoggerCfg.destination = loggerconfig.Destination(c)
|
||||||
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
|
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
|
||||||
var opts []zap.Option
|
|
||||||
if loggerconfig.ToLokiConfig(c).Enabled {
|
|
||||||
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
|
||||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
|
|
||||||
return lokiCore
|
|
||||||
})}
|
|
||||||
}
|
|
||||||
a.LoggerCfg.options = opts
|
|
||||||
|
|
||||||
// Object
|
// Object
|
||||||
|
|
||||||
|
@ -258,47 +247,45 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||||
// Storage Engine
|
// Storage Engine
|
||||||
|
|
||||||
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
||||||
|
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
|
||||||
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
|
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
|
||||||
|
|
||||||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
|
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
|
||||||
var target shardCfg
|
var newConfig shardCfg
|
||||||
|
|
||||||
target.refillMetabase = source.RefillMetabase()
|
newConfig.refillMetabase = oldConfig.RefillMetabase()
|
||||||
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
|
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
|
||||||
target.mode = source.Mode()
|
newConfig.mode = oldConfig.Mode()
|
||||||
target.compress = source.Compress()
|
newConfig.compress = oldConfig.Compress()
|
||||||
target.estimateCompressibility = source.EstimateCompressibility()
|
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
|
||||||
target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold()
|
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
|
||||||
target.uncompressableContentType = source.UncompressableContentTypes()
|
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
|
||||||
target.smallSizeObjectLimit = source.SmallSizeLimit()
|
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
|
||||||
|
|
||||||
a.setShardWriteCacheConfig(&target, source)
|
a.setShardWriteCacheConfig(&newConfig, oldConfig)
|
||||||
|
|
||||||
a.setShardPiloramaConfig(c, &target, source)
|
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
|
||||||
|
|
||||||
if err := a.setShardStorageConfig(&target, source); err != nil {
|
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
a.setMetabaseConfig(&target, source)
|
a.setMetabaseConfig(&newConfig, oldConfig)
|
||||||
|
|
||||||
a.setGCConfig(&target, source)
|
a.setGCConfig(&newConfig, oldConfig)
|
||||||
if err := a.setLimiter(&target, source); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
a.EngineCfg.shards = append(a.EngineCfg.shards, target)
|
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
|
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
writeCacheCfg := source.WriteCache()
|
writeCacheCfg := oldConfig.WriteCache()
|
||||||
if writeCacheCfg.Enabled() {
|
if writeCacheCfg.Enabled() {
|
||||||
wc := &target.writecacheCfg
|
wc := &newConfig.writecacheCfg
|
||||||
|
|
||||||
wc.enabled = true
|
wc.enabled = true
|
||||||
wc.path = writeCacheCfg.Path()
|
wc.path = writeCacheCfg.Path()
|
||||||
|
@ -311,10 +298,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
|
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
||||||
piloramaCfg := source.Pilorama()
|
piloramaCfg := oldConfig.Pilorama()
|
||||||
pr := &target.piloramaCfg
|
pr := &newConfig.piloramaCfg
|
||||||
|
|
||||||
pr.enabled = true
|
pr.enabled = true
|
||||||
pr.path = piloramaCfg.Path()
|
pr.path = piloramaCfg.Path()
|
||||||
|
@ -325,8 +312,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
|
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
|
||||||
blobStorCfg := source.BlobStor()
|
blobStorCfg := oldConfig.BlobStor()
|
||||||
storagesCfg := blobStorCfg.Storages()
|
storagesCfg := blobStorCfg.Storages()
|
||||||
|
|
||||||
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
||||||
|
@ -360,13 +347,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
|
||||||
ss = append(ss, sCfg)
|
ss = append(ss, sCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
target.subStorages = ss
|
newConfig.subStorages = ss
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
|
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
metabaseCfg := source.Metabase()
|
metabaseCfg := oldConfig.Metabase()
|
||||||
m := &target.metaCfg
|
m := &newConfig.metaCfg
|
||||||
|
|
||||||
m.path = metabaseCfg.Path()
|
m.path = metabaseCfg.Path()
|
||||||
m.perm = metabaseCfg.BoltDB().Perm()
|
m.perm = metabaseCfg.BoltDB().Perm()
|
||||||
|
@ -374,25 +361,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
|
||||||
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
|
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
gcCfg := source.GC()
|
gcCfg := oldConfig.GC()
|
||||||
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||||
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||||
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||||
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||||
}
|
|
||||||
|
|
||||||
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
|
|
||||||
limitsConfig := source.Limits()
|
|
||||||
limiter, err := qos.NewLimiter(limitsConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if target.limiter != nil {
|
|
||||||
target.limiter.Close()
|
|
||||||
}
|
|
||||||
target.limiter = limiter
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// internals contains application-specific internals that are created
|
// internals contains application-specific internals that are created
|
||||||
|
@ -482,6 +456,7 @@ type shared struct {
|
||||||
// dynamicConfiguration stores parameters of the
|
// dynamicConfiguration stores parameters of the
|
||||||
// components that supports runtime reconfigurations.
|
// components that supports runtime reconfigurations.
|
||||||
type dynamicConfiguration struct {
|
type dynamicConfiguration struct {
|
||||||
|
logger *logger.Prm
|
||||||
pprof *httpComponent
|
pprof *httpComponent
|
||||||
metrics *httpComponent
|
metrics *httpComponent
|
||||||
}
|
}
|
||||||
|
@ -553,8 +528,6 @@ type cfgGRPC struct {
|
||||||
maxChunkSize uint64
|
maxChunkSize uint64
|
||||||
maxAddrAmount uint64
|
maxAddrAmount uint64
|
||||||
reconnectTimeout time.Duration
|
reconnectTimeout time.Duration
|
||||||
|
|
||||||
limiter atomic.Pointer[limiting.SemaphoreLimiter]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
|
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
|
||||||
|
@ -691,6 +664,10 @@ type cfgAccessPolicyEngine struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type cfgObjectRoutines struct {
|
type cfgObjectRoutines struct {
|
||||||
|
putRemote *ants.Pool
|
||||||
|
|
||||||
|
putLocal *ants.Pool
|
||||||
|
|
||||||
replication *ants.Pool
|
replication *ants.Pool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -722,11 +699,16 @@ func initCfg(appCfg *config.Config) *cfg {
|
||||||
|
|
||||||
netState.metrics = c.metricsCollector
|
netState.metrics = c.metricsCollector
|
||||||
|
|
||||||
logPrm, err := c.loggerPrm()
|
logPrm := c.loggerPrm()
|
||||||
fatalOnErr(err)
|
|
||||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||||
log, err := logger.NewLogger(logPrm)
|
log, err := logger.NewLogger(logPrm)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||||
|
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||||
|
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||||
|
return lokiCore
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
c.internals = initInternals(appCfg, log)
|
c.internals = initInternals(appCfg, log)
|
||||||
|
|
||||||
|
@ -870,14 +852,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initCfgGRPC() (cfg cfgGRPC) {
|
func initCfgGRPC() cfgGRPC {
|
||||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||||
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
|
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
|
||||||
|
|
||||||
cfg.maxChunkSize = maxChunkSize
|
return cfgGRPC{
|
||||||
cfg.maxAddrAmount = maxAddrAmount
|
maxChunkSize: maxChunkSize,
|
||||||
|
maxAddrAmount: maxAddrAmount,
|
||||||
return
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initCfgObject(appCfg *config.Config) cfgObject {
|
func initCfgObject(appCfg *config.Config) cfgObject {
|
||||||
|
@ -894,6 +876,7 @@ func (c *cfg) engineOpts() []engine.Option {
|
||||||
var opts []engine.Option
|
var opts []engine.Option
|
||||||
|
|
||||||
opts = append(opts,
|
opts = append(opts,
|
||||||
|
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
|
||||||
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
|
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
|
||||||
engine.WithLogger(c.log),
|
engine.WithLogger(c.log),
|
||||||
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
|
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
|
||||||
|
@ -933,7 +916,6 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
|
||||||
writecache.WithMaxCacheCount(wcRead.countLimit),
|
writecache.WithMaxCacheCount(wcRead.countLimit),
|
||||||
writecache.WithNoSync(wcRead.noSync),
|
writecache.WithNoSync(wcRead.noSync),
|
||||||
writecache.WithLogger(c.log),
|
writecache.WithLogger(c.log),
|
||||||
writecache.WithQoSLimiter(shCfg.limiter),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return writeCacheOpts
|
return writeCacheOpts
|
||||||
|
@ -1049,7 +1031,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
||||||
}
|
}
|
||||||
if c.metricsCollector != nil {
|
if c.metricsCollector != nil {
|
||||||
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
|
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
|
||||||
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var sh shardOptsWithID
|
var sh shardOptsWithID
|
||||||
|
@ -1074,28 +1055,30 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
||||||
|
|
||||||
return pool
|
return pool
|
||||||
}),
|
}),
|
||||||
shard.WithLimiter(shCfg.limiter),
|
|
||||||
}
|
}
|
||||||
return sh
|
return sh
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) loggerPrm() (logger.Prm, error) {
|
func (c *cfg) loggerPrm() *logger.Prm {
|
||||||
var prm logger.Prm
|
// check if it has been inited before
|
||||||
// (re)init read configuration
|
if c.dynamicConfiguration.logger == nil {
|
||||||
err := prm.SetLevelString(c.LoggerCfg.level)
|
c.dynamicConfiguration.logger = new(logger.Prm)
|
||||||
if err != nil {
|
|
||||||
// not expected since validation should be performed before
|
|
||||||
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
|
|
||||||
}
|
}
|
||||||
err = prm.SetDestination(c.LoggerCfg.destination)
|
|
||||||
if err != nil {
|
|
||||||
// not expected since validation should be performed before
|
|
||||||
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
|
|
||||||
}
|
|
||||||
prm.PrependTimestamp = c.LoggerCfg.timestamp
|
|
||||||
prm.Options = c.LoggerCfg.options
|
|
||||||
|
|
||||||
return prm, nil
|
// (re)init read configuration
|
||||||
|
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
|
||||||
|
if err != nil {
|
||||||
|
// not expected since validation should be performed before
|
||||||
|
panic("incorrect log level format: " + c.LoggerCfg.level)
|
||||||
|
}
|
||||||
|
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
|
||||||
|
if err != nil {
|
||||||
|
// not expected since validation should be performed before
|
||||||
|
panic("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||||
|
}
|
||||||
|
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
|
||||||
|
|
||||||
|
return c.dynamicConfiguration.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) LocalAddress() network.AddressGroup {
|
func (c *cfg) LocalAddress() network.AddressGroup {
|
||||||
|
@ -1183,7 +1166,21 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
||||||
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
|
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
optNonBlocking := ants.WithNonblocking(true)
|
||||||
|
|
||||||
|
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
|
||||||
|
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
|
||||||
|
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
|
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
|
||||||
|
if replicatorPoolSize <= 0 {
|
||||||
|
replicatorPoolSize = putRemoteCapacity
|
||||||
|
}
|
||||||
|
|
||||||
pool.replication, err = ants.NewPool(replicatorPoolSize)
|
pool.replication, err = ants.NewPool(replicatorPoolSize)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
@ -1335,7 +1332,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
// all the components are expected to support
|
// all the components are expected to support
|
||||||
// Logger's dynamic reconfiguration approach
|
// Logger's dynamic reconfiguration approach
|
||||||
|
|
||||||
components := c.getComponents(ctx)
|
// Logger
|
||||||
|
|
||||||
|
logPrm := c.loggerPrm()
|
||||||
|
|
||||||
|
components := c.getComponents(ctx, logPrm)
|
||||||
|
|
||||||
// Object
|
// Object
|
||||||
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
|
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
|
||||||
|
@ -1373,17 +1374,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) getComponents(ctx context.Context) []dCmp {
|
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||||
var components []dCmp
|
var components []dCmp
|
||||||
|
|
||||||
components = append(components, dCmp{"logger", func() error {
|
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||||
prm, err := c.loggerPrm()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.log.Reload(prm)
|
|
||||||
return nil
|
|
||||||
}})
|
|
||||||
components = append(components, dCmp{"runtime", func() error {
|
components = append(components, dCmp{"runtime", func() error {
|
||||||
setRuntimeParameters(ctx, c)
|
setRuntimeParameters(ctx, c)
|
||||||
return nil
|
return nil
|
||||||
|
@ -1416,13 +1410,17 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
|
||||||
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
|
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
|
||||||
}
|
}
|
||||||
|
|
||||||
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
|
|
||||||
|
|
||||||
return components
|
return components
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) reloadPools() error {
|
func (c *cfg) reloadPools() error {
|
||||||
newSize := replicatorconfig.PoolSize(c.appCfg)
|
newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
|
||||||
|
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
|
||||||
|
|
||||||
|
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
|
||||||
|
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
|
||||||
|
|
||||||
|
newSize = replicatorconfig.PoolSize(c.appCfg)
|
||||||
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
|
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -12,10 +12,13 @@ import (
|
||||||
func TestConfigDir(t *testing.T) {
|
func TestConfigDir(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
cfgFileName := path.Join(dir, "cfg_01.yml")
|
cfgFileName0 := path.Join(dir, "cfg_00.json")
|
||||||
|
cfgFileName1 := path.Join(dir, "cfg_01.yml")
|
||||||
|
|
||||||
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
|
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
|
||||||
|
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
|
||||||
|
|
||||||
c := New("", dir, "")
|
c := New("", dir, "")
|
||||||
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
|
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
|
||||||
|
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,10 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
subsection = "storage"
|
subsection = "storage"
|
||||||
|
|
||||||
|
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
|
||||||
|
// process object PUT operations in a storage engine.
|
||||||
|
ShardPoolSizeDefault = 20
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
|
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
|
||||||
|
@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
|
||||||
|
//
|
||||||
|
// Returns ShardPoolSizeDefault if the value is not a positive number.
|
||||||
|
func ShardPoolSize(c *config.Config) uint32 {
|
||||||
|
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
|
||||||
|
if v > 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
return ShardPoolSizeDefault
|
||||||
|
}
|
||||||
|
|
||||||
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
|
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
|
||||||
//
|
//
|
||||||
// Returns 0 if the the value is missing.
|
// Returns 0 if the the value is missing.
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
|
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
|
||||||
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
||||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
|
||||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||||
|
@ -54,6 +53,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.False(t, handlerCalled)
|
require.False(t, handlerCalled)
|
||||||
|
|
||||||
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
|
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
|
||||||
|
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
|
||||||
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
|
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -63,6 +63,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
num := 0
|
num := 0
|
||||||
|
|
||||||
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
|
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
|
||||||
|
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
|
||||||
|
|
||||||
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
|
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -75,7 +76,6 @@ func TestEngineSection(t *testing.T) {
|
||||||
ss := blob.Storages()
|
ss := blob.Storages()
|
||||||
pl := sc.Pilorama()
|
pl := sc.Pilorama()
|
||||||
gc := sc.GC()
|
gc := sc.GC()
|
||||||
limits := sc.Limits()
|
|
||||||
|
|
||||||
switch num {
|
switch num {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -134,75 +134,6 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.Equal(t, false, sc.RefillMetabase())
|
require.Equal(t, false, sc.RefillMetabase())
|
||||||
require.Equal(t, mode.ReadOnly, sc.Mode())
|
require.Equal(t, mode.ReadOnly, sc.Mode())
|
||||||
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
|
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
|
||||||
|
|
||||||
readLimits := limits.Read()
|
|
||||||
writeLimits := limits.Write()
|
|
||||||
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
|
|
||||||
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
|
|
||||||
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
|
|
||||||
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
|
|
||||||
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
|
|
||||||
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
|
|
||||||
require.ElementsMatch(t, readLimits.Tags,
|
|
||||||
[]limitsconfig.IOTagConfig{
|
|
||||||
{
|
|
||||||
Tag: "internal",
|
|
||||||
Weight: toPtr(20),
|
|
||||||
ReservedOps: toPtr(1000),
|
|
||||||
LimitOps: toPtr(0),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "client",
|
|
||||||
Weight: toPtr(70),
|
|
||||||
ReservedOps: toPtr(10000),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "background",
|
|
||||||
Weight: toPtr(5),
|
|
||||||
LimitOps: toPtr(10000),
|
|
||||||
ReservedOps: toPtr(0),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "writecache",
|
|
||||||
Weight: toPtr(5),
|
|
||||||
LimitOps: toPtr(25000),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "policer",
|
|
||||||
Weight: toPtr(5),
|
|
||||||
LimitOps: toPtr(25000),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.ElementsMatch(t, writeLimits.Tags,
|
|
||||||
[]limitsconfig.IOTagConfig{
|
|
||||||
{
|
|
||||||
Tag: "internal",
|
|
||||||
Weight: toPtr(200),
|
|
||||||
ReservedOps: toPtr(100),
|
|
||||||
LimitOps: toPtr(0),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "client",
|
|
||||||
Weight: toPtr(700),
|
|
||||||
ReservedOps: toPtr(1000),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "background",
|
|
||||||
Weight: toPtr(50),
|
|
||||||
LimitOps: toPtr(1000),
|
|
||||||
ReservedOps: toPtr(0),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "writecache",
|
|
||||||
Weight: toPtr(50),
|
|
||||||
LimitOps: toPtr(2500),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Tag: "policer",
|
|
||||||
Weight: toPtr(50),
|
|
||||||
LimitOps: toPtr(2500),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case 1:
|
case 1:
|
||||||
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
|
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
|
||||||
require.Equal(t, fs.FileMode(0o644), pl.Perm())
|
require.Equal(t, fs.FileMode(0o644), pl.Perm())
|
||||||
|
@ -257,17 +188,6 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.Equal(t, true, sc.RefillMetabase())
|
require.Equal(t, true, sc.RefillMetabase())
|
||||||
require.Equal(t, mode.ReadWrite, sc.Mode())
|
require.Equal(t, mode.ReadWrite, sc.Mode())
|
||||||
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
|
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
|
||||||
|
|
||||||
readLimits := limits.Read()
|
|
||||||
writeLimits := limits.Write()
|
|
||||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
|
|
||||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
|
|
||||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
|
|
||||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
|
|
||||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
|
|
||||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
|
|
||||||
require.Equal(t, 0, len(readLimits.Tags))
|
|
||||||
require.Equal(t, 0, len(writeLimits.Tags))
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -281,7 +201,3 @@ func TestEngineSection(t *testing.T) {
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func toPtr(v float64) *float64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
|
@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
|
||||||
// Returns 0 if the value is not a positive number.
|
// Returns 0 if the value is not a positive number.
|
||||||
func (x *Config) MaxBatchDelay() time.Duration {
|
func (x *Config) MaxBatchDelay() time.Duration {
|
||||||
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
||||||
return max(d, 0)
|
if d < 0 {
|
||||||
|
d = 0
|
||||||
|
}
|
||||||
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
||||||
|
@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
|
||||||
// Returns 0 if the value is not a positive number.
|
// Returns 0 if the value is not a positive number.
|
||||||
func (x *Config) MaxBatchSize() int {
|
func (x *Config) MaxBatchSize() int {
|
||||||
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
||||||
return max(s, 0)
|
if s < 0 {
|
||||||
|
s = 0
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// NoSync returns the value of "no_sync" config parameter.
|
// NoSync returns the value of "no_sync" config parameter.
|
||||||
|
@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
|
||||||
// Returns 0 if the value is not a positive number.
|
// Returns 0 if the value is not a positive number.
|
||||||
func (x *Config) PageSize() int {
|
func (x *Config) PageSize() int {
|
||||||
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
|
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
|
||||||
return max(s, 0)
|
if s < 0 {
|
||||||
|
s = 0
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||||
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
|
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
|
||||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
|
||||||
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
|
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
|
||||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||||
|
@ -126,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Limits returns "limits" subsection as a limitsconfig.Config.
|
|
||||||
func (x *Config) Limits() *limitsconfig.Config {
|
|
||||||
return limitsconfig.From(
|
|
||||||
(*config.Config)(x).
|
|
||||||
Sub("limits"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefillMetabase returns the value of "resync_metabase" config parameter.
|
// RefillMetabase returns the value of "resync_metabase" config parameter.
|
||||||
//
|
//
|
||||||
// Returns false if the value is not a valid bool.
|
// Returns false if the value is not a valid bool.
|
||||||
|
|
|
@ -1,130 +0,0 @@
|
||||||
package limits
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
||||||
"github.com/spf13/cast"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
NoLimit int64 = math.MaxInt64
|
|
||||||
DefaultIdleTimeout = 5 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// From wraps config section into Config.
|
|
||||||
func From(c *config.Config) *Config {
|
|
||||||
return (*Config)(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is a wrapper over the config section
|
|
||||||
// which provides access to Shard's limits configurations.
|
|
||||||
type Config config.Config
|
|
||||||
|
|
||||||
// Read returns the value of "read" limits config section.
|
|
||||||
func (x *Config) Read() OpConfig {
|
|
||||||
return x.parse("read")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write returns the value of "write" limits config section.
|
|
||||||
func (x *Config) Write() OpConfig {
|
|
||||||
return x.parse("write")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Config) parse(sub string) OpConfig {
|
|
||||||
c := (*config.Config)(x).Sub(sub)
|
|
||||||
var result OpConfig
|
|
||||||
|
|
||||||
if s := config.Int(c, "max_waiting_ops"); s > 0 {
|
|
||||||
result.MaxWaitingOps = s
|
|
||||||
} else {
|
|
||||||
result.MaxWaitingOps = NoLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
if s := config.Int(c, "max_running_ops"); s > 0 {
|
|
||||||
result.MaxRunningOps = s
|
|
||||||
} else {
|
|
||||||
result.MaxRunningOps = NoLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
|
|
||||||
result.IdleTimeout = s
|
|
||||||
} else {
|
|
||||||
result.IdleTimeout = DefaultIdleTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Tags = tags(c)
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
type OpConfig struct {
|
|
||||||
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
|
|
||||||
//
|
|
||||||
// Equals NoLimit if the value is not a positive number.
|
|
||||||
MaxWaitingOps int64
|
|
||||||
// MaxRunningOps returns the value of "max_running_ops" config parameter.
|
|
||||||
//
|
|
||||||
// Equals NoLimit if the value is not a positive number.
|
|
||||||
MaxRunningOps int64
|
|
||||||
// IdleTimeout returns the value of "idle_timeout" config parameter.
|
|
||||||
//
|
|
||||||
// Equals DefaultIdleTimeout if the value is not a valid duration.
|
|
||||||
IdleTimeout time.Duration
|
|
||||||
// Tags returns the value of "tags" config parameter.
|
|
||||||
//
|
|
||||||
// Equals nil if the value is not a valid tags config slice.
|
|
||||||
Tags []IOTagConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
type IOTagConfig struct {
|
|
||||||
Tag string
|
|
||||||
Weight *float64
|
|
||||||
LimitOps *float64
|
|
||||||
ReservedOps *float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func tags(c *config.Config) []IOTagConfig {
|
|
||||||
c = c.Sub("tags")
|
|
||||||
var result []IOTagConfig
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
tag := config.String(c, strconv.Itoa(i)+".tag")
|
|
||||||
if tag == "" {
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
var tagConfig IOTagConfig
|
|
||||||
tagConfig.Tag = tag
|
|
||||||
|
|
||||||
v := c.Value(strconv.Itoa(i) + ".weight")
|
|
||||||
if v != nil {
|
|
||||||
w, err := cast.ToFloat64E(v)
|
|
||||||
panicOnErr(err)
|
|
||||||
tagConfig.Weight = &w
|
|
||||||
}
|
|
||||||
|
|
||||||
v = c.Value(strconv.Itoa(i) + ".limit_ops")
|
|
||||||
if v != nil {
|
|
||||||
l, err := cast.ToFloat64E(v)
|
|
||||||
panicOnErr(err)
|
|
||||||
tagConfig.LimitOps = &l
|
|
||||||
}
|
|
||||||
|
|
||||||
v = c.Value(strconv.Itoa(i) + ".reserved_ops")
|
|
||||||
if v != nil {
|
|
||||||
r, err := cast.ToFloat64E(v)
|
|
||||||
panicOnErr(err)
|
|
||||||
tagConfig.ReservedOps = &r
|
|
||||||
}
|
|
||||||
|
|
||||||
result = append(result, tagConfig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicOnErr(err error) {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
|
||||||
// Returns 0 if the value is not a positive number.
|
// Returns 0 if the value is not a positive number.
|
||||||
func (x *Config) MaxBatchDelay() time.Duration {
|
func (x *Config) MaxBatchDelay() time.Duration {
|
||||||
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
||||||
return max(d, 0)
|
if d <= 0 {
|
||||||
|
d = 0
|
||||||
|
}
|
||||||
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
||||||
|
@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
|
||||||
// Returns 0 if the value is not a positive number.
|
// Returns 0 if the value is not a positive number.
|
||||||
func (x *Config) MaxBatchSize() int {
|
func (x *Config) MaxBatchSize() int {
|
||||||
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
||||||
return max(s, 0)
|
if s <= 0 {
|
||||||
|
s = 0
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,10 @@ const (
|
||||||
|
|
||||||
putSubsection = "put"
|
putSubsection = "put"
|
||||||
getSubsection = "get"
|
getSubsection = "get"
|
||||||
|
|
||||||
|
// PutPoolSizeDefault is a default value of routine pool size to
|
||||||
|
// process object.Put requests in object service.
|
||||||
|
PutPoolSizeDefault = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
// Put returns structure that provides access to "put" subsection of
|
// Put returns structure that provides access to "put" subsection of
|
||||||
|
@ -31,6 +35,30 @@ func Put(c *config.Config) PutConfig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
|
||||||
|
//
|
||||||
|
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||||
|
func (g PutConfig) PoolSizeRemote() int {
|
||||||
|
v := config.Int(g.cfg, "remote_pool_size")
|
||||||
|
if v > 0 {
|
||||||
|
return int(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return PutPoolSizeDefault
|
||||||
|
}
|
||||||
|
|
||||||
|
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
|
||||||
|
//
|
||||||
|
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||||
|
func (g PutConfig) PoolSizeLocal() int {
|
||||||
|
v := config.Int(g.cfg, "local_pool_size")
|
||||||
|
if v > 0 {
|
||||||
|
return int(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return PutPoolSizeDefault
|
||||||
|
}
|
||||||
|
|
||||||
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
|
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
|
||||||
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
|
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
|
||||||
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
|
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
|
||||||
|
|
|
@ -13,6 +13,8 @@ func TestObjectSection(t *testing.T) {
|
||||||
t.Run("defaults", func(t *testing.T) {
|
t.Run("defaults", func(t *testing.T) {
|
||||||
empty := configtest.EmptyConfig()
|
empty := configtest.EmptyConfig()
|
||||||
|
|
||||||
|
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
|
||||||
|
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
|
||||||
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
|
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
|
||||||
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
|
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
|
||||||
})
|
})
|
||||||
|
@ -20,6 +22,8 @@ func TestObjectSection(t *testing.T) {
|
||||||
const path = "../../../../config/example/node"
|
const path = "../../../../config/example/node"
|
||||||
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
fileConfigTest := func(c *config.Config) {
|
||||||
|
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
|
||||||
|
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
|
||||||
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
|
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
|
||||||
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
|
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,6 @@ const (
|
||||||
|
|
||||||
// PutTimeoutDefault is a default timeout of object put request in replicator.
|
// PutTimeoutDefault is a default timeout of object put request in replicator.
|
||||||
PutTimeoutDefault = 5 * time.Second
|
PutTimeoutDefault = 5 * time.Second
|
||||||
// PoolSizeDefault is a default pool size for put request in replicator.
|
|
||||||
PoolSizeDefault = 10
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PutTimeout returns the value of "put_timeout" config parameter
|
// PutTimeout returns the value of "put_timeout" config parameter
|
||||||
|
@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration {
|
||||||
|
|
||||||
// PoolSize returns the value of "pool_size" config parameter
|
// PoolSize returns the value of "pool_size" config parameter
|
||||||
// from "replicator" section.
|
// from "replicator" section.
|
||||||
//
|
|
||||||
// Returns PoolSizeDefault if the value is non-positive integer.
|
|
||||||
func PoolSize(c *config.Config) int {
|
func PoolSize(c *config.Config) int {
|
||||||
v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
|
return int(config.IntSafe(c.Sub(subsection), "pool_size"))
|
||||||
if v > 0 {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
return PoolSizeDefault
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
|
||||||
empty := configtest.EmptyConfig()
|
empty := configtest.EmptyConfig()
|
||||||
|
|
||||||
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
|
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
|
||||||
require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
|
require.Equal(t, 0, replicatorconfig.PoolSize(empty))
|
||||||
})
|
})
|
||||||
|
|
||||||
const path = "../../../../config/example/node"
|
const path = "../../../../config/example/node"
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
package rpcconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
subsection = "rpc"
|
|
||||||
limitsSubsection = "limits"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LimitConfig struct {
|
|
||||||
Methods []string
|
|
||||||
MaxOps int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limits returns the "limits" config from "rpc" section.
|
|
||||||
func Limits(c *config.Config) []LimitConfig {
|
|
||||||
c = c.Sub(subsection).Sub(limitsSubsection)
|
|
||||||
|
|
||||||
var limits []LimitConfig
|
|
||||||
|
|
||||||
for i := uint64(0); ; i++ {
|
|
||||||
si := strconv.FormatUint(i, 10)
|
|
||||||
sc := c.Sub(si)
|
|
||||||
|
|
||||||
methods := config.StringSliceSafe(sc, "methods")
|
|
||||||
if len(methods) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if sc.Value("max_ops") == nil {
|
|
||||||
panic("no max operations for method group")
|
|
||||||
}
|
|
||||||
|
|
||||||
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
|
|
||||||
}
|
|
||||||
|
|
||||||
return limits
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
package rpcconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
||||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRPCSection(t *testing.T) {
|
|
||||||
t.Run("defaults", func(t *testing.T) {
|
|
||||||
require.Empty(t, Limits(configtest.EmptyConfig()))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("correct config", func(t *testing.T) {
|
|
||||||
const path = "../../../../config/example/node"
|
|
||||||
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
|
||||||
limits := Limits(c)
|
|
||||||
require.Len(t, limits, 2)
|
|
||||||
|
|
||||||
limit0 := limits[0]
|
|
||||||
limit1 := limits[1]
|
|
||||||
|
|
||||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
|
||||||
require.Equal(t, limit0.MaxOps, int64(1000))
|
|
||||||
|
|
||||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
|
||||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
|
||||||
}
|
|
||||||
|
|
||||||
configtest.ForEachFileType(path, fileConfigTest)
|
|
||||||
|
|
||||||
t.Run("ENV", func(t *testing.T) {
|
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("no max operations", func(t *testing.T) {
|
|
||||||
const path = "testdata/no_max_ops"
|
|
||||||
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
|
||||||
require.Panics(t, func() { _ = Limits(c) })
|
|
||||||
}
|
|
||||||
|
|
||||||
configtest.ForEachFileType(path, fileConfigTest)
|
|
||||||
|
|
||||||
t.Run("ENV", func(t *testing.T) {
|
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("zero max operations", func(t *testing.T) {
|
|
||||||
const path = "testdata/zero_max_ops"
|
|
||||||
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
|
||||||
limits := Limits(c)
|
|
||||||
require.Len(t, limits, 2)
|
|
||||||
|
|
||||||
limit0 := limits[0]
|
|
||||||
limit1 := limits[1]
|
|
||||||
|
|
||||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
|
||||||
require.Equal(t, limit0.MaxOps, int64(0))
|
|
||||||
|
|
||||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
|
||||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
|
||||||
}
|
|
||||||
|
|
||||||
configtest.ForEachFileType(path, fileConfigTest)
|
|
||||||
|
|
||||||
t.Run("ENV", func(t *testing.T) {
|
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,3 +0,0 @@
|
||||||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
|
||||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
|
|
@ -1,18 +0,0 @@
|
||||||
{
|
|
||||||
"rpc": {
|
|
||||||
"limits": [
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
|
||||||
"/neo.fs.v2.object.ObjectService/Put"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
],
|
|
||||||
"max_ops": 10000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
rpc:
|
|
||||||
limits:
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
|
||||||
- /neo.fs.v2.object.ObjectService/Put
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/Get
|
|
||||||
max_ops: 10000
|
|
|
@ -1,4 +0,0 @@
|
||||||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
|
||||||
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
|
|
||||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
|
|
@ -1,19 +0,0 @@
|
||||||
{
|
|
||||||
"rpc": {
|
|
||||||
"limits": [
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
|
||||||
"/neo.fs.v2.object.ObjectService/Put"
|
|
||||||
],
|
|
||||||
"max_ops": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
],
|
|
||||||
"max_ops": 10000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
rpc:
|
|
||||||
limits:
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
|
||||||
- /neo.fs.v2.object.ObjectService/Put
|
|
||||||
max_ops: 0
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/Get
|
|
||||||
max_ops: 10000
|
|
|
@ -4,18 +4,14 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
||||||
rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
|
||||||
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -138,13 +134,11 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
|
||||||
qos.NewUnaryServerInterceptor(),
|
qos.NewUnaryServerInterceptor(),
|
||||||
metrics.NewUnaryServerInterceptor(),
|
metrics.NewUnaryServerInterceptor(),
|
||||||
tracing.NewUnaryServerInterceptor(),
|
tracing.NewUnaryServerInterceptor(),
|
||||||
qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
|
|
||||||
),
|
),
|
||||||
grpc.ChainStreamInterceptor(
|
grpc.ChainStreamInterceptor(
|
||||||
qos.NewStreamServerInterceptor(),
|
qos.NewStreamServerInterceptor(),
|
||||||
metrics.NewStreamServerInterceptor(),
|
metrics.NewStreamServerInterceptor(),
|
||||||
tracing.NewStreamServerInterceptor(),
|
tracing.NewStreamServerInterceptor(),
|
||||||
qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
|
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,54 +227,3 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
|
||||||
|
|
||||||
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initRPCLimiter(c *cfg) error {
|
|
||||||
var limits []limiting.KeyLimit
|
|
||||||
for _, l := range rpcconfig.Limits(c.appCfg) {
|
|
||||||
limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := validateRPCLimits(c, limits); err != nil {
|
|
||||||
return fmt.Errorf("validate RPC limits: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
limiter, err := limiting.NewSemaphoreLimiter(limits)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create RPC limiter: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.cfgGRPC.limiter.Store(limiter)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
|
|
||||||
availableMethods := getAvailableMethods(c.cfgGRPC.servers)
|
|
||||||
for _, limit := range limits {
|
|
||||||
for _, method := range limit.Keys {
|
|
||||||
if _, ok := availableMethods[method]; !ok {
|
|
||||||
return fmt.Errorf("set limit on an unknown method %q", method)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAvailableMethods(servers []grpcServer) map[string]struct{} {
|
|
||||||
res := make(map[string]struct{})
|
|
||||||
for _, server := range servers {
|
|
||||||
for _, method := range getMethodsForServer(server.Server) {
|
|
||||||
res[method] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMethodsForServer(server *grpc.Server) []string {
|
|
||||||
var res []string
|
|
||||||
for service, info := range server.GetServiceInfo() {
|
|
||||||
for _, method := range info.Methods {
|
|
||||||
res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
|
@ -117,8 +117,6 @@ func initApp(ctx context.Context, c *cfg) {
|
||||||
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
||||||
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
||||||
|
|
||||||
initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
|
|
||||||
|
|
||||||
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||||
|
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
|
||||||
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
|
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
|
||||||
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
||||||
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
|
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
|
||||||
|
@ -171,10 +172,12 @@ func initObjectService(c *cfg) {
|
||||||
|
|
||||||
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
||||||
|
|
||||||
apeSvc := createAPEService(c, &irFetcher, splitSvc)
|
apeSvc := createAPEService(c, splitSvc)
|
||||||
|
|
||||||
|
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
|
||||||
|
|
||||||
var commonSvc objectService.Common
|
var commonSvc objectService.Common
|
||||||
commonSvc.Init(&c.internals, apeSvc)
|
commonSvc.Init(&c.internals, aclSvc)
|
||||||
|
|
||||||
respSvc := objectService.NewResponseService(
|
respSvc := objectService.NewResponseService(
|
||||||
&commonSvc,
|
&commonSvc,
|
||||||
|
@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
|
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||||
return &innerRingFetcherWithNotary{
|
return &innerRingFetcherWithNotary{
|
||||||
sidechain: c.cfgMorph.client,
|
sidechain: c.cfgMorph.client,
|
||||||
}
|
}
|
||||||
|
@ -323,6 +326,7 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
|
||||||
c,
|
c,
|
||||||
c.cfgNetmap.state,
|
c.cfgNetmap.state,
|
||||||
irFetcher,
|
irFetcher,
|
||||||
|
objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
|
||||||
objectwriter.WithLogger(c.log),
|
objectwriter.WithLogger(c.log),
|
||||||
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
|
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
|
||||||
)
|
)
|
||||||
|
@ -426,7 +430,17 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
|
||||||
|
return v2.New(
|
||||||
|
apeSvc,
|
||||||
|
c.netMapSource,
|
||||||
|
irFetcher,
|
||||||
|
c.cfgObject.cnrSource,
|
||||||
|
v2.WithLogger(c.log),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
||||||
return objectAPE.NewService(
|
return objectAPE.NewService(
|
||||||
objectAPE.NewChecker(
|
objectAPE.NewChecker(
|
||||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
|
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
|
||||||
|
@ -438,7 +452,6 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
|
||||||
c.cfgObject.cnrSource,
|
c.cfgObject.cnrSource,
|
||||||
c.binPublicKey,
|
c.binPublicKey,
|
||||||
),
|
),
|
||||||
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
|
|
||||||
splitSvc,
|
splitSvc,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -451,7 +464,7 @@ func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Ad
|
||||||
return e.engine.IsLocked(ctx, address)
|
return e.engine.IsLocked(ctx, address)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
|
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID, expEpoch uint64) error {
|
||||||
var prm engine.InhumePrm
|
var prm engine.InhumePrm
|
||||||
|
|
||||||
addrs := make([]oid.Address, len(toDelete))
|
addrs := make([]oid.Address, len(toDelete))
|
||||||
|
@ -460,7 +473,7 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
|
||||||
addrs[i].SetObject(toDelete[i])
|
addrs[i].SetObject(toDelete[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
prm.WithTarget(tombstone, addrs...)
|
prm.WithTarget(tombstone, expEpoch, addrs...)
|
||||||
|
|
||||||
return e.engine.Inhume(ctx, prm)
|
return e.engine.Inhume(ctx, prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,14 +43,11 @@ func initQoSService(c *cfg) {
|
||||||
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
|
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
|
||||||
rawTag, defined := qosTagging.IOTagFromContext(ctx)
|
rawTag, defined := qosTagging.IOTagFromContext(ctx)
|
||||||
if !defined {
|
if !defined {
|
||||||
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
|
|
||||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
|
|
||||||
}
|
|
||||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||||
}
|
}
|
||||||
ioTag, err := qos.FromRawString(rawTag)
|
ioTag, err := qos.FromRawString(rawTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
|
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
|
||||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,36 +70,26 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
|
|
||||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||||
case qos.IOTagInternal:
|
case qos.IOTagInternal:
|
||||||
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
|
for _, pk := range s.allowedInternalPubs {
|
||||||
return ctx
|
if bytes.Equal(pk, requestSignPublicKey) {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nm, err := s.netmapSource.GetNetMap(ctx, 0)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
|
||||||
|
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||||
|
}
|
||||||
|
for _, node := range nm.Nodes() {
|
||||||
|
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
}
|
}
|
||||||
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
|
|
||||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||||
default:
|
default:
|
||||||
s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
|
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
|
||||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
|
|
||||||
for _, pk := range s.allowedInternalPubs {
|
|
||||||
if bytes.Equal(pk, publicKey) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nm, err := s.netmapSource.GetNetMap(ctx, 0)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, node := range nm.Nodes() {
|
|
||||||
if bytes.Equal(node.PublicKey(), publicKey) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,226 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
|
||||||
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestQoSService_Client(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
s, pk := testQoSServicePrepare(t)
|
|
||||||
t.Run("IO tag client defined", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
|
|
||||||
ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
|
|
||||||
ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQoSService_Internal(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
s, pk := testQoSServicePrepare(t)
|
|
||||||
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
|
|
||||||
ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQoSService_Critical(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
s, pk := testQoSServicePrepare(t)
|
|
||||||
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagCritical.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagCritical.String(), tag)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQoSService_NetmapGetError(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
s, pk := testQoSServicePrepare(t)
|
|
||||||
s.netmapSource = &utilTesting.TestNetmapSource{}
|
|
||||||
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
|
|
||||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
|
||||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
|
|
||||||
nmSigner, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
reqSigner, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
allowedCritSigner, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
allowedIntSigner, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var node netmap.NodeInfo
|
|
||||||
node.SetPublicKey(nmSigner.PublicKey().Bytes())
|
|
||||||
nm := &netmap.NetMap{}
|
|
||||||
nm.SetEpoch(100)
|
|
||||||
nm.SetNodes([]netmap.NodeInfo{node})
|
|
||||||
|
|
||||||
return &cfgQoSService{
|
|
||||||
logger: test.NewLogger(t),
|
|
||||||
netmapSource: &utilTesting.TestNetmapSource{
|
|
||||||
Netmaps: map[uint64]*netmap.NetMap{
|
|
||||||
100: nm,
|
|
||||||
},
|
|
||||||
CurrentEpoch: 100,
|
|
||||||
},
|
|
||||||
allowedCriticalPubs: [][]byte{
|
|
||||||
allowedCritSigner.PublicKey().Bytes(),
|
|
||||||
},
|
|
||||||
allowedInternalPubs: [][]byte{
|
|
||||||
allowedIntSigner.PublicKey().Bytes(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&testQoSServicePublicKeys{
|
|
||||||
NetmapNode: nmSigner.PublicKey().Bytes(),
|
|
||||||
Request: reqSigner.PublicKey().Bytes(),
|
|
||||||
Internal: allowedIntSigner.PublicKey().Bytes(),
|
|
||||||
Critical: allowedCritSigner.PublicKey().Bytes(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type testQoSServicePublicKeys struct {
|
|
||||||
NetmapNode []byte
|
|
||||||
Request []byte
|
|
||||||
Internal []byte
|
|
||||||
Critical []byte
|
|
||||||
}
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("mainnet", func(t *testing.T) {
|
||||||
|
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||||
|
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
|
||||||
|
c := config.New(p, "", config.EnvPrefix)
|
||||||
|
require.NoError(t, validateConfig(c))
|
||||||
|
})
|
||||||
|
t.Run("testnet", func(t *testing.T) {
|
||||||
|
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||||
|
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
|
||||||
|
c := config.New(p, "", config.EnvPrefix)
|
||||||
|
require.NoError(t, validateConfig(c))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,16 +87,14 @@ FROSTFS_REPLICATOR_POOL_SIZE=10
|
||||||
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
|
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
|
||||||
|
|
||||||
# Object service section
|
# Object service section
|
||||||
|
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||||
|
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||||
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
||||||
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
||||||
FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
|
FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
|
||||||
|
|
||||||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
|
||||||
FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
|
|
||||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
|
||||||
|
|
||||||
# Storage engine section
|
# Storage engine section
|
||||||
|
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
|
||||||
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
|
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
|
||||||
## 0 shard
|
## 0 shard
|
||||||
### Flag to refill Metabase from BlobStor
|
### Flag to refill Metabase from BlobStor
|
||||||
|
@ -156,47 +154,6 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
|
||||||
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
|
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
|
||||||
#### Limit of concurrent workers collecting expired objects by the garbage collector
|
#### Limit of concurrent workers collecting expired objects by the garbage collector
|
||||||
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
|
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
|
||||||
#### Limits config
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
|
|
||||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
|
|
||||||
|
|
||||||
## 1 shard
|
## 1 shard
|
||||||
### Flag to refill Metabase from BlobStor
|
### Flag to refill Metabase from BlobStor
|
||||||
|
|
|
@ -134,30 +134,16 @@
|
||||||
"tombstone_lifetime": 10
|
"tombstone_lifetime": 10
|
||||||
},
|
},
|
||||||
"put": {
|
"put": {
|
||||||
|
"remote_pool_size": 100,
|
||||||
|
"local_pool_size": 200,
|
||||||
"skip_session_token_issuer_verification": true
|
"skip_session_token_issuer_verification": true
|
||||||
},
|
},
|
||||||
"get": {
|
"get": {
|
||||||
"priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
|
"priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rpc": {
|
|
||||||
"limits": [
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
|
||||||
"/neo.fs.v2.object.ObjectService/Put"
|
|
||||||
],
|
|
||||||
"max_ops": 1000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
],
|
|
||||||
"max_ops": 10000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"storage": {
|
"storage": {
|
||||||
|
"shard_pool_size": 15,
|
||||||
"shard_ro_error_threshold": 100,
|
"shard_ro_error_threshold": 100,
|
||||||
"shard": {
|
"shard": {
|
||||||
"0": {
|
"0": {
|
||||||
|
@ -220,76 +206,6 @@
|
||||||
"remover_sleep_interval": "2m",
|
"remover_sleep_interval": "2m",
|
||||||
"expired_collector_batch_size": 1500,
|
"expired_collector_batch_size": 1500,
|
||||||
"expired_collector_worker_count": 15
|
"expired_collector_worker_count": 15
|
||||||
},
|
|
||||||
"limits": {
|
|
||||||
"read": {
|
|
||||||
"max_running_ops": 10000,
|
|
||||||
"max_waiting_ops": 1000,
|
|
||||||
"idle_timeout": "30s",
|
|
||||||
"tags": [
|
|
||||||
{
|
|
||||||
"tag": "internal",
|
|
||||||
"weight": 20,
|
|
||||||
"limit_ops": 0,
|
|
||||||
"reserved_ops": 1000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "client",
|
|
||||||
"weight": 70,
|
|
||||||
"reserved_ops": 10000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "background",
|
|
||||||
"weight": 5,
|
|
||||||
"limit_ops": 10000,
|
|
||||||
"reserved_ops": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "writecache",
|
|
||||||
"weight": 5,
|
|
||||||
"limit_ops": 25000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "policer",
|
|
||||||
"weight": 5,
|
|
||||||
"limit_ops": 25000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"write": {
|
|
||||||
"max_running_ops": 1000,
|
|
||||||
"max_waiting_ops": 100,
|
|
||||||
"idle_timeout": "45s",
|
|
||||||
"tags": [
|
|
||||||
{
|
|
||||||
"tag": "internal",
|
|
||||||
"weight": 200,
|
|
||||||
"limit_ops": 0,
|
|
||||||
"reserved_ops": 100
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "client",
|
|
||||||
"weight": 700,
|
|
||||||
"reserved_ops": 1000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "background",
|
|
||||||
"weight": 50,
|
|
||||||
"limit_ops": 1000,
|
|
||||||
"reserved_ops": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "writecache",
|
|
||||||
"weight": 50,
|
|
||||||
"limit_ops": 2500
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"tag": "policer",
|
|
||||||
"weight": 50,
|
|
||||||
"limit_ops": 2500
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"1": {
|
"1": {
|
||||||
|
|
|
@ -117,24 +117,17 @@ object:
|
||||||
delete:
|
delete:
|
||||||
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
||||||
put:
|
put:
|
||||||
|
remote_pool_size: 100 # number of async workers for remote PUT operations
|
||||||
|
local_pool_size: 200 # number of async workers for local PUT operations
|
||||||
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
||||||
get:
|
get:
|
||||||
priority: # list of metrics of nodes for prioritization
|
priority: # list of metrics of nodes for prioritization
|
||||||
- $attribute:ClusterName
|
- $attribute:ClusterName
|
||||||
- $attribute:UN-LOCODE
|
- $attribute:UN-LOCODE
|
||||||
|
|
||||||
rpc:
|
|
||||||
limits:
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
|
||||||
- /neo.fs.v2.object.ObjectService/Put
|
|
||||||
max_ops: 1000
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/Get
|
|
||||||
max_ops: 10000
|
|
||||||
|
|
||||||
storage:
|
storage:
|
||||||
# note: shard configuration can be omitted for relay node (see `node.relay`)
|
# note: shard configuration can be omitted for relay node (see `node.relay`)
|
||||||
|
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
|
||||||
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
|
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
|
||||||
|
|
||||||
shard:
|
shard:
|
||||||
|
@ -226,52 +219,6 @@ storage:
|
||||||
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
|
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
|
||||||
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
|
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
|
||||||
|
|
||||||
limits:
|
|
||||||
read:
|
|
||||||
max_running_ops: 10000
|
|
||||||
max_waiting_ops: 1000
|
|
||||||
idle_timeout: 30s
|
|
||||||
tags:
|
|
||||||
- tag: internal
|
|
||||||
weight: 20
|
|
||||||
limit_ops: 0
|
|
||||||
reserved_ops: 1000
|
|
||||||
- tag: client
|
|
||||||
weight: 70
|
|
||||||
reserved_ops: 10000
|
|
||||||
- tag: background
|
|
||||||
weight: 5
|
|
||||||
limit_ops: 10000
|
|
||||||
reserved_ops: 0
|
|
||||||
- tag: writecache
|
|
||||||
weight: 5
|
|
||||||
limit_ops: 25000
|
|
||||||
- tag: policer
|
|
||||||
weight: 5
|
|
||||||
limit_ops: 25000
|
|
||||||
write:
|
|
||||||
max_running_ops: 1000
|
|
||||||
max_waiting_ops: 100
|
|
||||||
idle_timeout: 45s
|
|
||||||
tags:
|
|
||||||
- tag: internal
|
|
||||||
weight: 200
|
|
||||||
limit_ops: 0
|
|
||||||
reserved_ops: 100
|
|
||||||
- tag: client
|
|
||||||
weight: 700
|
|
||||||
reserved_ops: 1000
|
|
||||||
- tag: background
|
|
||||||
weight: 50
|
|
||||||
limit_ops: 1000
|
|
||||||
reserved_ops: 0
|
|
||||||
- tag: writecache
|
|
||||||
weight: 50
|
|
||||||
limit_ops: 2500
|
|
||||||
- tag: policer
|
|
||||||
weight: 50
|
|
||||||
limit_ops: 2500
|
|
||||||
|
|
||||||
1:
|
1:
|
||||||
writecache:
|
writecache:
|
||||||
path: tmp/1/cache # write-cache root directory
|
path: tmp/1/cache # write-cache root directory
|
||||||
|
|
28
config/mainnet/README.md
Normal file
28
config/mainnet/README.md
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
# N3 Mainnet Storage node configuration
|
||||||
|
|
||||||
|
Here is a template for simple storage node configuration in N3 Mainnet.
|
||||||
|
Make sure to specify correct values instead of `<...>` placeholders.
|
||||||
|
Do not change `contracts` section. Run the latest frostfs-node release with
|
||||||
|
the fixed config `frostfs-node -c config.yml`
|
||||||
|
|
||||||
|
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
|
||||||
|
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
|
||||||
|
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
|
||||||
|
|
||||||
|
## Tips
|
||||||
|
|
||||||
|
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
|
||||||
|
```yaml
|
||||||
|
node:
|
||||||
|
addresses:
|
||||||
|
- grpcs://frostfs.my.org:8080
|
||||||
|
|
||||||
|
grpc:
|
||||||
|
num: 1
|
||||||
|
0:
|
||||||
|
endpoint: frostfs.my.org:8080
|
||||||
|
tls:
|
||||||
|
enabled: true
|
||||||
|
certificate: /path/to/cert
|
||||||
|
key: /path/to/key
|
||||||
|
```
|
70
config/mainnet/config.yml
Normal file
70
config/mainnet/config.yml
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
node:
|
||||||
|
wallet:
|
||||||
|
path: <path/to/wallet>
|
||||||
|
address: <address-in-wallet>
|
||||||
|
password: <password>
|
||||||
|
addresses:
|
||||||
|
- <announced.address:port>
|
||||||
|
attribute_0: UN-LOCODE:<XX YYY>
|
||||||
|
attribute_1: Price:100000
|
||||||
|
attribute_2: User-Agent:FrostFS\/0.9999
|
||||||
|
|
||||||
|
grpc:
|
||||||
|
num: 1
|
||||||
|
0:
|
||||||
|
endpoint: <listen.local.address:port>
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
storage:
|
||||||
|
shard_num: 1
|
||||||
|
shard:
|
||||||
|
0:
|
||||||
|
metabase:
|
||||||
|
path: /storage/path/metabase
|
||||||
|
perm: 0600
|
||||||
|
blobstor:
|
||||||
|
- path: /storage/path/blobovnicza
|
||||||
|
type: blobovnicza
|
||||||
|
perm: 0600
|
||||||
|
opened_cache_capacity: 32
|
||||||
|
depth: 1
|
||||||
|
width: 1
|
||||||
|
- path: /storage/path/fstree
|
||||||
|
type: fstree
|
||||||
|
perm: 0600
|
||||||
|
depth: 4
|
||||||
|
writecache:
|
||||||
|
enabled: false
|
||||||
|
gc:
|
||||||
|
remover_batch_size: 100
|
||||||
|
remover_sleep_interval: 1m
|
||||||
|
|
||||||
|
logger:
|
||||||
|
level: info
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
address: localhost:9090
|
||||||
|
shutdown_timeout: 15s
|
||||||
|
|
||||||
|
object:
|
||||||
|
put:
|
||||||
|
remote_pool_size: 100
|
||||||
|
local_pool_size: 100
|
||||||
|
|
||||||
|
morph:
|
||||||
|
rpc_endpoint:
|
||||||
|
- wss://rpc1.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc2.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc3.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc4.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc5.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc6.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc7.morph.frostfs.info:40341/ws
|
||||||
|
dial_timeout: 20s
|
||||||
|
|
||||||
|
contracts:
|
||||||
|
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
|
||||||
|
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
|
||||||
|
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
|
129
config/testnet/README.md
Normal file
129
config/testnet/README.md
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
# N3 Testnet Storage node configuration
|
||||||
|
|
||||||
|
There is a prepared configuration for NeoFS Storage Node deployment in
|
||||||
|
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
|
||||||
|
docker image and run it with docker-compose.
|
||||||
|
|
||||||
|
## Build image
|
||||||
|
|
||||||
|
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
|
||||||
|
However, if you need to rebuild it for some reason, run
|
||||||
|
`make image-storage-testnet` command.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make image-storage-testnet
|
||||||
|
...
|
||||||
|
Successfully built ab0557117b02
|
||||||
|
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy node
|
||||||
|
|
||||||
|
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
|
||||||
|
update docker-compose file and start the node.
|
||||||
|
|
||||||
|
### Deposit
|
||||||
|
|
||||||
|
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
|
||||||
|
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
|
||||||
|
|
||||||
|
First, obtain GAS in N3 Testnet chain. You can do that with
|
||||||
|
[faucet](https://neowish.ngd.network) service.
|
||||||
|
|
||||||
|
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
|
||||||
|
You can provide scripthash in the `data` argument of transfer tx to make a
|
||||||
|
deposit to a specified account. Otherwise, deposit is made to the tx sender.
|
||||||
|
|
||||||
|
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
|
||||||
|
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
|
||||||
|
|
||||||
|
See a deposit example with `neo-go`.
|
||||||
|
|
||||||
|
```
|
||||||
|
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
|
||||||
|
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
|
||||||
|
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
|
||||||
|
--token GAS \
|
||||||
|
--amount 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure
|
||||||
|
|
||||||
|
Next, configure `node_config.env` file. Change endpoints values. Both
|
||||||
|
should contain your **public** IP.
|
||||||
|
|
||||||
|
```
|
||||||
|
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||||
|
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||||
|
```
|
||||||
|
|
||||||
|
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
|
||||||
|
attribute.
|
||||||
|
|
||||||
|
```
|
||||||
|
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||||
|
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||||
|
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
|
||||||
|
```
|
||||||
|
|
||||||
|
You can validate UN/LOCODE attribute in
|
||||||
|
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
|
||||||
|
with frostfs-cli.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
|
||||||
|
Country: Russia
|
||||||
|
Location: Saint Petersburg (ex Leningrad)
|
||||||
|
Continent: Europe
|
||||||
|
Subdivision: [SPE] Sankt-Peterburg
|
||||||
|
Coordinates: 59.53, 30.15
|
||||||
|
```
|
||||||
|
|
||||||
|
It is recommended to pass the node's key as a file. To do so, convert your wallet
|
||||||
|
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
|
||||||
|
|
||||||
|
```
|
||||||
|
// Print WIF in a 32-byte hex format
|
||||||
|
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||||
|
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
|
||||||
|
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
|
||||||
|
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||||
|
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
|
||||||
|
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
|
||||||
|
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
|
||||||
|
|
||||||
|
// Save 32-byte hex into a file
|
||||||
|
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, specify the path to this file in `docker-compose.yml`
|
||||||
|
```yaml
|
||||||
|
volumes:
|
||||||
|
- frostfs_storage:/storage
|
||||||
|
- ./my_wallet.key:/node.key
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
NeoFS objects will be stored on your machine. By default, docker-compose
|
||||||
|
is configured to store objects in named docker volume `frostfs_storage`. You can
|
||||||
|
specify a directory on the filesystem to store objects there.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
volumes:
|
||||||
|
- /home/username/frostfs/rc3/storage:/storage
|
||||||
|
- ./my_wallet.key:/node.key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start
|
||||||
|
|
||||||
|
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
|
||||||
|
|
||||||
|
### Debug
|
||||||
|
|
||||||
|
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
|
||||||
|
log, set up log level to debug with this env:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
environment:
|
||||||
|
- NEOFS_LOGGER_LEVEL=debug
|
||||||
|
```
|
52
config/testnet/config.yml
Normal file
52
config/testnet/config.yml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
logger:
|
||||||
|
level: info
|
||||||
|
|
||||||
|
morph:
|
||||||
|
rpc_endpoint:
|
||||||
|
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
|
||||||
|
dial_timeout: 20s
|
||||||
|
|
||||||
|
contracts:
|
||||||
|
balance: e0420c216003747626670d1424569c17c79015bf
|
||||||
|
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
|
||||||
|
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
|
||||||
|
|
||||||
|
node:
|
||||||
|
key: /node.key
|
||||||
|
attribute_0: Deployed:SelfHosted
|
||||||
|
attribute_1: User-Agent:FrostFS\/0.9999
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
address: localhost:9090
|
||||||
|
shutdown_timeout: 15s
|
||||||
|
|
||||||
|
storage:
|
||||||
|
shard_num: 1
|
||||||
|
shard:
|
||||||
|
0:
|
||||||
|
metabase:
|
||||||
|
path: /storage/metabase
|
||||||
|
perm: 0777
|
||||||
|
blobstor:
|
||||||
|
- path: /storage/path/blobovnicza
|
||||||
|
type: blobovnicza
|
||||||
|
perm: 0600
|
||||||
|
opened_cache_capacity: 32
|
||||||
|
depth: 1
|
||||||
|
width: 1
|
||||||
|
- path: /storage/path/fstree
|
||||||
|
type: fstree
|
||||||
|
perm: 0600
|
||||||
|
depth: 4
|
||||||
|
writecache:
|
||||||
|
enabled: false
|
||||||
|
gc:
|
||||||
|
remover_batch_size: 100
|
||||||
|
remover_sleep_interval: 1m
|
|
@ -51,7 +51,10 @@ However, all mode changing operations are idempotent.
|
||||||
|
|
||||||
## Automatic mode changes
|
## Automatic mode changes
|
||||||
|
|
||||||
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
|
Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
|
||||||
|
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
|
||||||
|
2. If shard error counter exceeds threshold.
|
||||||
|
3. If the metabase couldn't be reopened during SIGHUP handling.
|
||||||
|
|
||||||
# Detach shard
|
# Detach shard
|
||||||
|
|
||||||
|
|
|
@ -170,6 +170,7 @@ Local storage engine configuration.
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
|
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
|
||||||
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
|
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
|
||||||
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
|
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
|
||||||
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
|
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
|
||||||
|
@ -194,7 +195,6 @@ The following table describes configuration for each shard.
|
||||||
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
|
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
|
||||||
| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
|
| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
|
||||||
| `gc` | [GC config](#gc-subsection) | | GC configuration. |
|
| `gc` | [GC config](#gc-subsection) | | GC configuration. |
|
||||||
| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
|
|
||||||
|
|
||||||
### `blobstor` subsection
|
### `blobstor` subsection
|
||||||
|
|
||||||
|
@ -301,64 +301,6 @@ writecache:
|
||||||
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
|
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
|
||||||
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
|
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
|
||||||
|
|
||||||
### `limits` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
limits:
|
|
||||||
max_read_running_ops: 10000
|
|
||||||
max_read_waiting_ops: 1000
|
|
||||||
max_write_running_ops: 1000
|
|
||||||
max_write_waiting_ops: 100
|
|
||||||
read:
|
|
||||||
- tag: internal
|
|
||||||
weight: 20
|
|
||||||
limit_ops: 0
|
|
||||||
reserved_ops: 1000
|
|
||||||
- tag: client
|
|
||||||
weight: 70
|
|
||||||
reserved_ops: 10000
|
|
||||||
- tag: background
|
|
||||||
weight: 5
|
|
||||||
limit_ops: 10000
|
|
||||||
reserved_ops: 0
|
|
||||||
- tag: writecache
|
|
||||||
weight: 5
|
|
||||||
limit_ops: 25000
|
|
||||||
- tag: policer
|
|
||||||
weight: 5
|
|
||||||
limit_ops: 25000
|
|
||||||
write:
|
|
||||||
- tag: internal
|
|
||||||
weight: 200
|
|
||||||
limit_ops: 0
|
|
||||||
reserved_ops: 100
|
|
||||||
- tag: client
|
|
||||||
weight: 700
|
|
||||||
reserved_ops: 1000
|
|
||||||
- tag: background
|
|
||||||
weight: 50
|
|
||||||
limit_ops: 1000
|
|
||||||
reserved_ops: 0
|
|
||||||
- tag: writecache
|
|
||||||
weight: 50
|
|
||||||
limit_ops: 2500
|
|
||||||
- tag: policer
|
|
||||||
weight: 50
|
|
||||||
limit_ops: 2500
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
|
|
||||||
| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
|
|
||||||
| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
|
|
||||||
| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
|
|
||||||
| `read` | `[]tag` | empty | Array of shard read settings for tags. |
|
|
||||||
| `write` | `[]tag` | empty | Array of shard write settings for tags. |
|
|
||||||
| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
|
|
||||||
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
|
|
||||||
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
|
|
||||||
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
|
|
||||||
|
|
||||||
# `node` section
|
# `node` section
|
||||||
|
|
||||||
|
@ -454,16 +396,18 @@ replicator:
|
||||||
pool_size: 10
|
pool_size: 10
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|---------------|------------|---------------|---------------------------------------------|
|
|---------------|------------|----------------------------------------|---------------------------------------------|
|
||||||
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
|
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
|
||||||
| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
|
| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
|
||||||
|
|
||||||
# `object` section
|
# `object` section
|
||||||
Contains object-service related parameters.
|
Contains object-service related parameters.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
object:
|
object:
|
||||||
|
put:
|
||||||
|
remote_pool_size: 100
|
||||||
get:
|
get:
|
||||||
priority:
|
priority:
|
||||||
- $attribute:ClusterName
|
- $attribute:ClusterName
|
||||||
|
@ -472,29 +416,10 @@ object:
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
|
|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
|
||||||
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
||||||
|
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
|
||||||
|
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
|
||||||
| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
|
| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
|
||||||
|
|
||||||
|
|
||||||
# `rpc` section
|
|
||||||
Contains limits on the number of active RPC for specified method(s).
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rpc:
|
|
||||||
limits:
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
|
||||||
- /neo.fs.v2.object.ObjectService/Put
|
|
||||||
max_ops: 1000
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/Get
|
|
||||||
max_ops: 10000
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|------------------|------------|---------------|--------------------------------------------------------------|
|
|
||||||
| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
|
|
||||||
| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
|
|
||||||
|
|
||||||
# `runtime` section
|
# `runtime` section
|
||||||
Contains runtime parameters.
|
Contains runtime parameters.
|
||||||
|
|
||||||
|
|
12
go.mod
12
go.mod
|
@ -1,18 +1,18 @@
|
||||||
module git.frostfs.info/TrueCloudLab/frostfs-node
|
module git.frostfs.info/TrueCloudLab/frostfs-node
|
||||||
|
|
||||||
go 1.23
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
code.gitea.io/sdk/gitea v0.17.1
|
code.gitea.io/sdk/gitea v0.17.1
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167
|
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
github.com/VictoriaMetrics/easyproto v0.1.4
|
github.com/VictoriaMetrics/easyproto v0.1.4
|
||||||
|
|
20
go.sum
20
go.sum
|
@ -1,25 +1,25 @@
|
||||||
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
|
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
|
||||||
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
|
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 h1:NhqfqNcATndYwx413BaaYXxVJbkeu2vQOtVyxXw5xCQ=
|
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 h1:n7Pl8V7O1yS07J/fqdbzZjVe/mQW42a7eS0QHfgrzJw=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
|
||||||
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
|
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
|
||||||
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
|
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
package assert
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
func True(cond bool, details ...string) {
|
|
||||||
if !cond {
|
|
||||||
panic(strings.Join(details, " "))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -125,6 +125,7 @@ const (
|
||||||
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
|
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
|
||||||
SearchLocalOperationFailed = "local operation failed"
|
SearchLocalOperationFailed = "local operation failed"
|
||||||
UtilObjectServiceError = "object service error"
|
UtilObjectServiceError = "object service error"
|
||||||
|
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
|
||||||
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
|
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
|
||||||
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
|
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
|
||||||
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
|
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
|
||||||
|
@ -252,7 +253,8 @@ const (
|
||||||
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
|
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
|
||||||
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
|
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
|
||||||
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
|
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
|
||||||
ShardCouldNotFindObject = "could not find object"
|
ShardUnknownObjectTypeWhileIteratingExpiredObjects = "encountered unknown object type while iterating expired objects"
|
||||||
|
ShardFailedToRemoveExpiredGraves = "failed to remove expired graves"
|
||||||
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
|
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
|
||||||
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
|
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
|
||||||
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
|
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
|
||||||
|
@ -512,7 +514,5 @@ const (
|
||||||
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
|
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
|
||||||
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
|
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
|
||||||
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
|
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
|
||||||
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
|
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
|
||||||
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
|
|
||||||
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -23,7 +23,6 @@ const (
|
||||||
policerSubsystem = "policer"
|
policerSubsystem = "policer"
|
||||||
commonCacheSubsystem = "common_cache"
|
commonCacheSubsystem = "common_cache"
|
||||||
multinetSubsystem = "multinet"
|
multinetSubsystem = "multinet"
|
||||||
qosSubsystem = "qos"
|
|
||||||
|
|
||||||
successLabel = "success"
|
successLabel = "success"
|
||||||
shardIDLabel = "shard_id"
|
shardIDLabel = "shard_id"
|
||||||
|
@ -44,7 +43,6 @@ const (
|
||||||
hitLabel = "hit"
|
hitLabel = "hit"
|
||||||
cacheLabel = "cache"
|
cacheLabel = "cache"
|
||||||
sourceIPLabel = "source_ip"
|
sourceIPLabel = "source_ip"
|
||||||
ioTagLabel = "io_tag"
|
|
||||||
|
|
||||||
readWriteMode = "READ_WRITE"
|
readWriteMode = "READ_WRITE"
|
||||||
readOnlyMode = "READ_ONLY"
|
readOnlyMode = "READ_ONLY"
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
type GCMetrics interface {
|
type GCMetrics interface {
|
||||||
AddRunDuration(shardID string, d time.Duration, success bool)
|
AddRunDuration(shardID string, d time.Duration, success bool)
|
||||||
AddDeletedCount(shardID string, deleted, failed uint64)
|
AddDeletedCount(shardID string, deleted, failed uint64)
|
||||||
AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string)
|
AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool)
|
||||||
AddInhumedObjectCount(shardID string, count uint64, objectType string)
|
AddInhumedObjectCount(shardID string, count uint64, objectType string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,11 +71,10 @@ func (m *gcMetrics) AddDeletedCount(shardID string, deleted, failed uint64) {
|
||||||
}).Add(float64(failed))
|
}).Add(float64(failed))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *gcMetrics) AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool, objectType string) {
|
func (m *gcMetrics) AddExpiredObjectCollectionDuration(shardID string, d time.Duration, success bool) {
|
||||||
m.expCollectDuration.With(prometheus.Labels{
|
m.expCollectDuration.With(prometheus.Labels{
|
||||||
shardIDLabel: shardID,
|
shardIDLabel: shardID,
|
||||||
successLabel: strconv.FormatBool(success),
|
successLabel: strconv.FormatBool(success),
|
||||||
objectTypeLabel: objectType,
|
|
||||||
}).Add(d.Seconds())
|
}).Add(d.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@ type NodeMetrics struct {
|
||||||
morphCache *morphCacheMetrics
|
morphCache *morphCacheMetrics
|
||||||
log logger.LogMetrics
|
log logger.LogMetrics
|
||||||
multinet *multinetMetrics
|
multinet *multinetMetrics
|
||||||
qos *QoSMetrics
|
|
||||||
// nolint: unused
|
// nolint: unused
|
||||||
appInfo *ApplicationInfo
|
appInfo *ApplicationInfo
|
||||||
}
|
}
|
||||||
|
@ -56,7 +55,6 @@ func NewNodeMetrics() *NodeMetrics {
|
||||||
log: logger.NewLogMetrics(namespace),
|
log: logger.NewLogMetrics(namespace),
|
||||||
appInfo: NewApplicationInfo(misc.Version),
|
appInfo: NewApplicationInfo(misc.Version),
|
||||||
multinet: newMultinetMetrics(namespace),
|
multinet: newMultinetMetrics(namespace),
|
||||||
qos: newQoSMetrics(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,7 +126,3 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
|
||||||
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
|
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
|
||||||
return m.multinet
|
return m.multinet
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
|
|
||||||
return m.qos
|
|
||||||
}
|
|
||||||
|
|
|
@ -9,14 +9,13 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ObjectServiceMetrics interface {
|
type ObjectServiceMetrics interface {
|
||||||
AddRequestDuration(method string, d time.Duration, success bool, ioTag string)
|
AddRequestDuration(method string, d time.Duration, success bool)
|
||||||
AddPayloadSize(method string, size int)
|
AddPayloadSize(method string, size int)
|
||||||
}
|
}
|
||||||
|
|
||||||
type objectServiceMetrics struct {
|
type objectServiceMetrics struct {
|
||||||
methodDuration *prometheus.HistogramVec
|
methodDuration *prometheus.HistogramVec
|
||||||
payloadCounter *prometheus.CounterVec
|
payloadCounter *prometheus.CounterVec
|
||||||
ioTagOpsCounter *prometheus.CounterVec
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newObjectServiceMetrics() *objectServiceMetrics {
|
func newObjectServiceMetrics() *objectServiceMetrics {
|
||||||
|
@ -33,24 +32,14 @@ func newObjectServiceMetrics() *objectServiceMetrics {
|
||||||
Name: "request_payload_bytes",
|
Name: "request_payload_bytes",
|
||||||
Help: "Object Service request payload",
|
Help: "Object Service request payload",
|
||||||
}, []string{methodLabel}),
|
}, []string{methodLabel}),
|
||||||
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: objectSubsystem,
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Count of requests for each IO tag",
|
|
||||||
}, []string{methodLabel, ioTagLabel}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) {
|
func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) {
|
||||||
m.methodDuration.With(prometheus.Labels{
|
m.methodDuration.With(prometheus.Labels{
|
||||||
methodLabel: method,
|
methodLabel: method,
|
||||||
successLabel: strconv.FormatBool(success),
|
successLabel: strconv.FormatBool(success),
|
||||||
}).Observe(d.Seconds())
|
}).Observe(d.Seconds())
|
||||||
m.ioTagOpsCounter.With(prometheus.Labels{
|
|
||||||
ioTagLabel: ioTag,
|
|
||||||
methodLabel: method,
|
|
||||||
}).Inc()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
|
func (m *objectServiceMetrics) AddPayloadSize(method string, size int) {
|
||||||
|
|
|
@ -1,52 +0,0 @@
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type QoSMetrics struct {
|
|
||||||
opsCounter *prometheus.GaugeVec
|
|
||||||
}
|
|
||||||
|
|
||||||
func newQoSMetrics() *QoSMetrics {
|
|
||||||
return &QoSMetrics{
|
|
||||||
opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: qosSubsystem,
|
|
||||||
Name: "operations_total",
|
|
||||||
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
|
|
||||||
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) {
|
|
||||||
m.opsCounter.With(prometheus.Labels{
|
|
||||||
shardIDLabel: shardID,
|
|
||||||
operationLabel: operation,
|
|
||||||
ioTagLabel: tag,
|
|
||||||
typeLabel: "pending",
|
|
||||||
}).Set(float64(pending))
|
|
||||||
m.opsCounter.With(prometheus.Labels{
|
|
||||||
shardIDLabel: shardID,
|
|
||||||
operationLabel: operation,
|
|
||||||
ioTagLabel: tag,
|
|
||||||
typeLabel: "in_progress",
|
|
||||||
}).Set(float64(inProgress))
|
|
||||||
m.opsCounter.With(prometheus.Labels{
|
|
||||||
shardIDLabel: shardID,
|
|
||||||
operationLabel: operation,
|
|
||||||
ioTagLabel: tag,
|
|
||||||
typeLabel: "completed",
|
|
||||||
}).Set(float64(completed))
|
|
||||||
m.opsCounter.With(prometheus.Labels{
|
|
||||||
shardIDLabel: shardID,
|
|
||||||
operationLabel: operation,
|
|
||||||
ioTagLabel: tag,
|
|
||||||
typeLabel: "resource_exhausted",
|
|
||||||
}).Set(float64(resourceExhausted))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *QoSMetrics) Close(shardID string) {
|
|
||||||
m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
|
|
||||||
}
|
|
|
@ -12,14 +12,12 @@ type TreeMetricsRegister interface {
|
||||||
AddReplicateTaskDuration(time.Duration, bool)
|
AddReplicateTaskDuration(time.Duration, bool)
|
||||||
AddReplicateWaitDuration(time.Duration, bool)
|
AddReplicateWaitDuration(time.Duration, bool)
|
||||||
AddSyncDuration(time.Duration, bool)
|
AddSyncDuration(time.Duration, bool)
|
||||||
AddOperation(string, string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type treeServiceMetrics struct {
|
type treeServiceMetrics struct {
|
||||||
replicateTaskDuration *prometheus.HistogramVec
|
replicateTaskDuration *prometheus.HistogramVec
|
||||||
replicateWaitDuration *prometheus.HistogramVec
|
replicateWaitDuration *prometheus.HistogramVec
|
||||||
syncOpDuration *prometheus.HistogramVec
|
syncOpDuration *prometheus.HistogramVec
|
||||||
ioTagOpsCounter *prometheus.CounterVec
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
|
var _ TreeMetricsRegister = (*treeServiceMetrics)(nil)
|
||||||
|
@ -44,12 +42,6 @@ func newTreeServiceMetrics() *treeServiceMetrics {
|
||||||
Name: "sync_duration_seconds",
|
Name: "sync_duration_seconds",
|
||||||
Help: "Duration of synchronization operations",
|
Help: "Duration of synchronization operations",
|
||||||
}, []string{successLabel}),
|
}, []string{successLabel}),
|
||||||
ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: treeServiceSubsystem,
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Count of requests for each IO tag",
|
|
||||||
}, []string{methodLabel, ioTagLabel}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,10 +62,3 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) {
|
||||||
successLabel: strconv.FormatBool(success),
|
successLabel: strconv.FormatBool(success),
|
||||||
}).Observe(d.Seconds())
|
}).Observe(d.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *treeServiceMetrics) AddOperation(op string, ioTag string) {
|
|
||||||
m.ioTagOpsCounter.With(prometheus.Labels{
|
|
||||||
ioTagLabel: ioTag,
|
|
||||||
methodLabel: op,
|
|
||||||
}).Inc()
|
|
||||||
}
|
|
||||||
|
|
|
@ -3,9 +3,7 @@ package qos
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,7 +24,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tag = IOTagClient
|
tag = IOTagClient
|
||||||
}
|
}
|
||||||
if tag.IsLocal() {
|
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||||
tag = IOTagInternal
|
tag = IOTagInternal
|
||||||
}
|
}
|
||||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||||
|
@ -44,43 +42,10 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tag = IOTagClient
|
tag = IOTagClient
|
||||||
}
|
}
|
||||||
if tag.IsLocal() {
|
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||||
tag = IOTagInternal
|
tag = IOTagInternal
|
||||||
}
|
}
|
||||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||||
return streamer(ctx, desc, cc, method, opts...)
|
return streamer(ctx, desc, cc, method, opts...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor {
|
|
||||||
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
|
||||||
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() {
|
|
||||||
return handler(ctx, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
release, ok := getLimiter().Acquire(info.FullMethod)
|
|
||||||
if !ok {
|
|
||||||
return nil, new(apistatus.ResourceExhausted)
|
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
return handler(ctx, req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//nolint:contextcheck (grpc.ServerStream manages the context itself)
|
|
||||||
func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor {
|
|
||||||
return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
|
||||||
if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() {
|
|
||||||
return handler(srv, ss)
|
|
||||||
}
|
|
||||||
|
|
||||||
release, ok := getLimiter().Acquire(info.FullMethod)
|
|
||||||
if !ok {
|
|
||||||
return new(apistatus.ResourceExhausted)
|
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
return handler(srv, ss)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,236 +0,0 @@
|
||||||
package qos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultIdleTimeout time.Duration = 0
|
|
||||||
defaultShare float64 = 1.0
|
|
||||||
minusOne = ^uint64(0)
|
|
||||||
|
|
||||||
defaultMetricsCollectTimeout = 5 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type ReleaseFunc scheduling.ReleaseFunc
|
|
||||||
|
|
||||||
type Limiter interface {
|
|
||||||
ReadRequest(context.Context) (ReleaseFunc, error)
|
|
||||||
WriteRequest(context.Context) (ReleaseFunc, error)
|
|
||||||
SetParentID(string)
|
|
||||||
SetMetrics(Metrics)
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type scheduler interface {
|
|
||||||
RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error)
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLimiter(c *limits.Config) (Limiter, error) {
|
|
||||||
if err := validateConfig(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
readScheduler, err := createScheduler(c.Read())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("create read scheduler: %w", err)
|
|
||||||
}
|
|
||||||
writeScheduler, err := createScheduler(c.Write())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("create write scheduler: %w", err)
|
|
||||||
}
|
|
||||||
l := &mClockLimiter{
|
|
||||||
readScheduler: readScheduler,
|
|
||||||
writeScheduler: writeScheduler,
|
|
||||||
closeCh: make(chan struct{}),
|
|
||||||
wg: &sync.WaitGroup{},
|
|
||||||
readStats: createStats(),
|
|
||||||
writeStats: createStats(),
|
|
||||||
}
|
|
||||||
l.shardID.Store(&shardID{})
|
|
||||||
l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}})
|
|
||||||
l.startMetricsCollect()
|
|
||||||
return l, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createScheduler(config limits.OpConfig) (scheduler, error) {
|
|
||||||
if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit {
|
|
||||||
return newSemaphoreScheduler(config.MaxRunningOps), nil
|
|
||||||
}
|
|
||||||
return scheduling.NewMClock(
|
|
||||||
uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps),
|
|
||||||
converToSchedulingTags(config.Tags), config.IdleTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
|
|
||||||
result := make(map[string]scheduling.TagInfo)
|
|
||||||
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
|
|
||||||
result[tag.String()] = scheduling.TagInfo{
|
|
||||||
Share: defaultShare,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, l := range limits {
|
|
||||||
v := result[l.Tag]
|
|
||||||
if l.Weight != nil && *l.Weight != 0 {
|
|
||||||
v.Share = *l.Weight
|
|
||||||
}
|
|
||||||
if l.LimitOps != nil && *l.LimitOps != 0 {
|
|
||||||
v.LimitIOPS = l.LimitOps
|
|
||||||
}
|
|
||||||
if l.ReservedOps != nil && *l.ReservedOps != 0 {
|
|
||||||
v.ReservedIOPS = l.ReservedOps
|
|
||||||
}
|
|
||||||
result[l.Tag] = v
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ Limiter = (*noopLimiter)(nil)
|
|
||||||
releaseStub ReleaseFunc = func() {}
|
|
||||||
noopLimiterInstance = &noopLimiter{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewNoopLimiter() Limiter {
|
|
||||||
return noopLimiterInstance
|
|
||||||
}
|
|
||||||
|
|
||||||
type noopLimiter struct{}
|
|
||||||
|
|
||||||
func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) {
|
|
||||||
return releaseStub, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) {
|
|
||||||
return releaseStub, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *noopLimiter) SetParentID(string) {}
|
|
||||||
|
|
||||||
func (n *noopLimiter) Close() {}
|
|
||||||
|
|
||||||
func (n *noopLimiter) SetMetrics(Metrics) {}
|
|
||||||
|
|
||||||
var _ Limiter = (*mClockLimiter)(nil)
|
|
||||||
|
|
||||||
type shardID struct {
|
|
||||||
id string
|
|
||||||
}
|
|
||||||
|
|
||||||
type mClockLimiter struct {
|
|
||||||
readScheduler scheduler
|
|
||||||
writeScheduler scheduler
|
|
||||||
|
|
||||||
readStats map[string]*stat
|
|
||||||
writeStats map[string]*stat
|
|
||||||
|
|
||||||
shardID atomic.Pointer[shardID]
|
|
||||||
metrics atomic.Pointer[metricsHolder]
|
|
||||||
closeCh chan struct{}
|
|
||||||
wg *sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) {
|
|
||||||
return requestArrival(ctx, n.readScheduler, n.readStats)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) {
|
|
||||||
return requestArrival(ctx, n.writeScheduler, n.writeStats)
|
|
||||||
}
|
|
||||||
|
|
||||||
func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) {
|
|
||||||
tag, ok := tagging.IOTagFromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
tag = IOTagClient.String()
|
|
||||||
}
|
|
||||||
stat := getStat(tag, stats)
|
|
||||||
stat.pending.Add(1)
|
|
||||||
if tag == IOTagCritical.String() {
|
|
||||||
stat.inProgress.Add(1)
|
|
||||||
return func() {
|
|
||||||
stat.completed.Add(1)
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
rel, err := s.RequestArrival(ctx, tag)
|
|
||||||
stat.inProgress.Add(1)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
|
|
||||||
errors.Is(err, errSemaphoreLimitExceeded) {
|
|
||||||
stat.resourceExhausted.Add(1)
|
|
||||||
return nil, &apistatus.ResourceExhausted{}
|
|
||||||
}
|
|
||||||
stat.completed.Add(1)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return func() {
|
|
||||||
rel()
|
|
||||||
stat.completed.Add(1)
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *mClockLimiter) Close() {
|
|
||||||
n.readScheduler.Close()
|
|
||||||
n.writeScheduler.Close()
|
|
||||||
close(n.closeCh)
|
|
||||||
n.wg.Wait()
|
|
||||||
n.metrics.Load().metrics.Close(n.shardID.Load().id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *mClockLimiter) SetParentID(parentID string) {
|
|
||||||
n.shardID.Store(&shardID{id: parentID})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *mClockLimiter) SetMetrics(m Metrics) {
|
|
||||||
n.metrics.Store(&metricsHolder{metrics: m})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *mClockLimiter) startMetricsCollect() {
|
|
||||||
n.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer n.wg.Done()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(defaultMetricsCollectTimeout)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-n.closeCh:
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
shardID := n.shardID.Load().id
|
|
||||||
if shardID == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metrics := n.metrics.Load().metrics
|
|
||||||
exportMetrics(metrics, n.readStats, shardID, "read")
|
|
||||||
exportMetrics(metrics, n.writeStats, shardID, "write")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
|
|
||||||
var pending uint64
|
|
||||||
var inProgress uint64
|
|
||||||
var completed uint64
|
|
||||||
var resExh uint64
|
|
||||||
for tag, s := range stats {
|
|
||||||
pending = s.pending.Load()
|
|
||||||
inProgress = s.inProgress.Load()
|
|
||||||
completed = s.completed.Load()
|
|
||||||
resExh = s.resourceExhausted.Load()
|
|
||||||
if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
package qos
|
|
||||||
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
type Metrics interface {
|
|
||||||
SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64)
|
|
||||||
Close(shardID string)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Metrics = (*noopMetrics)(nil)
|
|
||||||
|
|
||||||
type noopMetrics struct{}
|
|
||||||
|
|
||||||
func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *noopMetrics) Close(string) {}
|
|
||||||
|
|
||||||
// stat presents limiter statistics cumulative counters.
|
|
||||||
//
|
|
||||||
// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`.
|
|
||||||
type stat struct {
|
|
||||||
completed atomic.Uint64
|
|
||||||
pending atomic.Uint64
|
|
||||||
resourceExhausted atomic.Uint64
|
|
||||||
inProgress atomic.Uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type metricsHolder struct {
|
|
||||||
metrics Metrics
|
|
||||||
}
|
|
|
@ -1,39 +0,0 @@
|
||||||
package qos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ scheduler = (*semaphore)(nil)
|
|
||||||
errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded")
|
|
||||||
)
|
|
||||||
|
|
||||||
type semaphore struct {
|
|
||||||
s *qosSemaphore.Semaphore
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSemaphoreScheduler(size int64) *semaphore {
|
|
||||||
return &semaphore{
|
|
||||||
s: qosSemaphore.NewSemaphore(size),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *semaphore) Close() {}
|
|
||||||
|
|
||||||
func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.s.Acquire() {
|
|
||||||
return s.s.Release, nil
|
|
||||||
}
|
|
||||||
return nil, errSemaphoreLimitExceeded
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue