forked from TrueCloudLab/frostfs-node
Compare commits
116 commits
bugfix/fix
...
master
Author | SHA1 | Date | |
---|---|---|---|
48930ec452 | |||
f37babdc54 | |||
fd37cea443 | |||
e80632884a | |||
5aaa3df533 | |||
3be33b7117 | |||
29b4fbe451 | |||
0d36e93169 | |||
8e87cbee17 | |||
12fc7850dd | |||
dfe2f9956a | |||
e06ecacf57 | |||
64c1392513 | |||
dcfd895449 | |||
6730e27ae7 | |||
f7779adf71 | |||
f93b96c601 | |||
aed84b567c | |||
fe29ed043a | |||
4f9d237042 | |||
dfdeedfc6f | |||
2394ae6ce0 | |||
c274bbeb7c | |||
faec499b38 | |||
46fd5e17b2 | |||
17cba3387e | |||
766d9ec46b | |||
0e1b01b15f | |||
4c03561aa2 | |||
f4696e8964 | |||
b0ef737a74 | |||
6f7b6b65f3 | |||
923f0acf8f | |||
9b5c1da40f | |||
b4b053cecd | |||
0c5d74729c | |||
c4f941a5f5 | |||
d933609084 | |||
3d771aa21c | |||
52367dc9b2 | |||
979d4bb2ae | |||
fbc623f34e | |||
5350632e01 | |||
2938498b52 | |||
272128e61f | |||
50dccff7c1 | |||
634de97509 | |||
2a6cdbdb72 | |||
11493d587b | |||
b924ecb850 | |||
e142d25fac | |||
bd1c18e117 | |||
b27f7d1d17 | |||
3cd8080232 | |||
a11b54ca15 | |||
b112a92408 | |||
19ca907223 | |||
f62d81e26a | |||
27899598dc | |||
bc6cc9ae2a | |||
6e1576cfdb | |||
a5bae6c5af | |||
5a13830a94 | |||
dcb2b23a7d | |||
115aae7c34 | |||
12a0537a7a | |||
30d4692c3e | |||
2254c8aff5 | |||
d432bebef4 | |||
d144abc977 | |||
a2053870e2 | |||
d00c606fee | |||
60446bb668 | |||
bd8ab2d84a | |||
bce2f7bef0 | |||
c2c05e2228 | |||
0a38571a10 | |||
632bd8e38d | |||
3bbee1b554 | |||
9358938222 | |||
5470b205fd | |||
163e2e9f83 | |||
0c664fa804 | |||
0a9d139e20 | |||
3bb1fb744a | |||
ccdd6cb767 | |||
73e35bc885 | |||
eed0824590 | |||
a4da1da767 | |||
30099194ba | |||
e7e91ef634 | |||
4919b6a206 | |||
d951289131 | |||
016f2e11e3 | |||
9aa486c9d8 | |||
af76350bfb | |||
3fa5c22ddf | |||
5385f9994f | |||
eea46a599d | |||
049a650b89 | |||
3f4717a37f | |||
60cea8c714 | |||
7df2912a83 | |||
affab25512 | |||
45b7796151 | |||
e8801dbf49 | |||
eb9df85b98 | |||
21bed3362c | |||
af5b3575d0 | |||
a49f0717b3 | |||
a7ac30da9c | |||
39f549a7ab | |||
760b6a44ea | |||
a11b2d27e4 | |||
a405fb1f39 | |||
a7319bc979 |
191 changed files with 4314 additions and 4330 deletions
81
.ci/Jenkinsfile
vendored
Normal file
81
.ci/Jenkinsfile
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
def golang = ['1.23', '1.24']
|
||||
def golangDefault = "golang:${golang.last()}"
|
||||
|
||||
async {
|
||||
|
||||
for (version in golang) {
|
||||
def go = version
|
||||
|
||||
task("test/go${go}") {
|
||||
container("golang:${go}") {
|
||||
sh 'make test'
|
||||
}
|
||||
}
|
||||
|
||||
task("build/go${go}") {
|
||||
container("golang:${go}") {
|
||||
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
|
||||
sh """
|
||||
make bin/frostfs-${app}
|
||||
bin/frostfs-${app} --version
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task('test/race') {
|
||||
container(golangDefault) {
|
||||
sh 'make test GOFLAGS="-count=1 -race"'
|
||||
}
|
||||
}
|
||||
|
||||
task('lint') {
|
||||
container(golangDefault) {
|
||||
sh 'make lint-install lint'
|
||||
}
|
||||
}
|
||||
|
||||
task('staticcheck') {
|
||||
container(golangDefault) {
|
||||
sh 'make staticcheck-install staticcheck-run'
|
||||
}
|
||||
}
|
||||
|
||||
task('gopls') {
|
||||
container(golangDefault) {
|
||||
sh 'make gopls-install gopls-run'
|
||||
}
|
||||
}
|
||||
|
||||
task('gofumpt') {
|
||||
container(golangDefault) {
|
||||
sh '''
|
||||
make fumpt-install
|
||||
make fumpt
|
||||
git diff --exit-code --quiet
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
task('vulncheck') {
|
||||
container(golangDefault) {
|
||||
sh '''
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
govulncheck ./...
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
task('pre-commit') {
|
||||
dockerfile("""
|
||||
FROM ${golangDefault}
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends pre-commit
|
||||
""") {
|
||||
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
|
||||
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
182
.golangci.yml
182
.golangci.yml
|
@ -1,101 +1,103 @@
|
|||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
|
||||
# options for analysis running
|
||||
version: "2"
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 20m
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: false
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
formats:
|
||||
- format: tab
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
exhaustive:
|
||||
# indicates that switch statements are to be considered exhaustive if a
|
||||
# 'default' case is present, even if all enum members aren't listed in the
|
||||
# switch
|
||||
default-signifies-exhaustive: true
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
custom-order: true
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
staticcheck:
|
||||
checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
|
||||
funlen:
|
||||
lines: 80 # default 60
|
||||
statements: 60 # default 40
|
||||
gocognit:
|
||||
min-complexity: 40 # default 30
|
||||
importas:
|
||||
no-unaliased: true
|
||||
no-extra-aliases: false
|
||||
alias:
|
||||
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
|
||||
alias: objectSDK
|
||||
unused:
|
||||
field-writes-are-uses: false
|
||||
exported-fields-are-used: false
|
||||
local-variables-are-used: false
|
||||
custom:
|
||||
truecloudlab-linters:
|
||||
path: bin/linters/external_linters.so
|
||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
||||
settings:
|
||||
noliteral:
|
||||
target-methods : ["reportFlushError", "reportError"]
|
||||
disable-packages: ["codes", "err", "res","exec"]
|
||||
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
||||
tab:
|
||||
path: stdout
|
||||
colors: false
|
||||
linters:
|
||||
default: none
|
||||
enable:
|
||||
# mandatory linters
|
||||
- govet
|
||||
- revive
|
||||
|
||||
# some default golangci-lint linters
|
||||
- errcheck
|
||||
- gosimple
|
||||
- godot
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
|
||||
# extra linters
|
||||
- bidichk
|
||||
- durationcheck
|
||||
- exhaustive
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- copyloopvar
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- exhaustive
|
||||
- funlen
|
||||
- gocognit
|
||||
- godot
|
||||
- importas
|
||||
- ineffassign
|
||||
- intrange
|
||||
- misspell
|
||||
- perfsprint
|
||||
- predeclared
|
||||
- protogetter
|
||||
- reassign
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- truecloudlab-linters
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- usetesting
|
||||
- whitespace
|
||||
settings:
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: true
|
||||
funlen:
|
||||
lines: 80
|
||||
statements: 60
|
||||
gocognit:
|
||||
min-complexity: 40
|
||||
importas:
|
||||
alias:
|
||||
- pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
|
||||
alias: objectSDK
|
||||
no-unaliased: true
|
||||
no-extra-aliases: false
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF1002
|
||||
unused:
|
||||
field-writes-are-uses: false
|
||||
exported-fields-are-used: false
|
||||
local-variables-are-used: false
|
||||
custom:
|
||||
truecloudlab-linters:
|
||||
path: bin/linters/external_linters.so
|
||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
||||
settings:
|
||||
noliteral:
|
||||
constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs
|
||||
disable-packages:
|
||||
- codes
|
||||
- err
|
||||
- res
|
||||
- exec
|
||||
target-methods:
|
||||
- reportFlushError
|
||||
- reportError
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
- misspell
|
||||
- predeclared
|
||||
- reassign
|
||||
- whitespace
|
||||
- containedctx
|
||||
- funlen
|
||||
- gocognit
|
||||
- contextcheck
|
||||
- importas
|
||||
- truecloudlab-linters
|
||||
- perfsprint
|
||||
- testifylint
|
||||
- protogetter
|
||||
- intrange
|
||||
- tenv
|
||||
- unconvert
|
||||
- unparam
|
||||
disable-all: true
|
||||
fast: false
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
custom-order: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
42
Makefile
42
Makefile
|
@ -1,5 +1,6 @@
|
|||
#!/usr/bin/make -f
|
||||
SHELL = bash
|
||||
.SHELLFLAGS = -euo pipefail -c
|
||||
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
|
@ -8,8 +9,8 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
|||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.23
|
||||
LINT_VERSION ?= 1.62.2
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
|
||||
LINT_VERSION ?= 2.0.2
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
|
||||
PROTOC_VERSION ?= 25.0
|
||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
|
||||
PROTOC_OS_VERSION=osx-x86_64
|
||||
|
@ -115,7 +116,7 @@ protoc:
|
|||
# Install protoc
|
||||
protoc-install:
|
||||
@rm -rf $(PROTOBUF_DIR)
|
||||
@mkdir $(PROTOBUF_DIR)
|
||||
@mkdir -p $(PROTOBUF_DIR)
|
||||
@echo "⇒ Installing protoc... "
|
||||
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
||||
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
||||
|
@ -169,7 +170,7 @@ imports:
|
|||
# Install gofumpt
|
||||
fumpt-install:
|
||||
@rm -rf $(GOFUMPT_DIR)
|
||||
@mkdir $(GOFUMPT_DIR)
|
||||
@mkdir -p $(GOFUMPT_DIR)
|
||||
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
||||
|
||||
# Run gofumpt
|
||||
|
@ -186,21 +187,44 @@ test:
|
|||
@echo "⇒ Running go test"
|
||||
@GOFLAGS="$(GOFLAGS)" go test ./...
|
||||
|
||||
# Install Gerrit commit-msg hook
|
||||
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
|
||||
review-install:
|
||||
@git config remote.review.url \
|
||||
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
|
||||
@mkdir -p $(GIT_HOOK_DIR)/
|
||||
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
|
||||
@chmod +x $(GIT_HOOK_DIR)/commit-msg
|
||||
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
|
||||
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
|
||||
|
||||
# Create a PR in Gerrit
|
||||
review: BRANCH ?= master
|
||||
review:
|
||||
@git push review HEAD:refs/for/$(BRANCH) \
|
||||
--push-option r=e.stratonikov@yadro.com \
|
||||
--push-option r=d.stepanov@yadro.com \
|
||||
--push-option r=an.nikiforov@yadro.com \
|
||||
--push-option r=a.arifullin@yadro.com \
|
||||
--push-option r=ekaterina.lebedeva@yadro.com \
|
||||
--push-option r=a.savchuk@yadro.com \
|
||||
--push-option r=a.chuprov@yadro.com
|
||||
|
||||
# Run pre-commit
|
||||
pre-commit-run:
|
||||
@pre-commit run -a --hook-stage manual
|
||||
|
||||
# Install linters
|
||||
lint-install:
|
||||
lint-install: $(BIN)
|
||||
@rm -rf $(OUTPUT_LINT_DIR)
|
||||
@mkdir $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(TMP_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
|
||||
# Run linters
|
||||
lint:
|
||||
|
@ -212,7 +236,7 @@ lint:
|
|||
# Install staticcheck
|
||||
staticcheck-install:
|
||||
@rm -rf $(STATICCHECK_DIR)
|
||||
@mkdir $(STATICCHECK_DIR)
|
||||
@mkdir -p $(STATICCHECK_DIR)
|
||||
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||
|
||||
# Run staticcheck
|
||||
|
@ -225,7 +249,7 @@ staticcheck-run:
|
|||
# Install gopls
|
||||
gopls-install:
|
||||
@rm -rf $(GOPLS_DIR)
|
||||
@mkdir $(GOPLS_DIR)
|
||||
@mkdir -p $(GOPLS_DIR)
|
||||
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
|
||||
|
||||
# Run gopls
|
||||
|
|
15
cmd/frostfs-adm/internal/modules/maintenance/root.go
Normal file
15
cmd/frostfs-adm/internal/modules/maintenance/root.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package maintenance
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "maintenance",
|
||||
Short: "Section for maintenance commands",
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(zombie.Cmd)
|
||||
}
|
70
cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
Normal file
70
cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go
Normal file
|
@ -0,0 +1,70 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/cli/input"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
|
||||
keyDesc := viper.GetString(walletFlag)
|
||||
if keyDesc == "" {
|
||||
return &nodeconfig.Key(appCfg).PrivateKey
|
||||
}
|
||||
data, err := os.ReadFile(keyDesc)
|
||||
commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
|
||||
|
||||
priv, err := keys.NewPrivateKeyFromBytes(data)
|
||||
if err != nil {
|
||||
w, err := wallet.NewWalletFromFile(keyDesc)
|
||||
commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
|
||||
return fromWallet(cmd, w, viper.GetString(addressFlag))
|
||||
}
|
||||
return &priv.PrivateKey
|
||||
}
|
||||
|
||||
func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
|
||||
var (
|
||||
addr util.Uint160
|
||||
err error
|
||||
)
|
||||
|
||||
if addrStr == "" {
|
||||
addr = w.GetChangeAddress()
|
||||
} else {
|
||||
addr, err = flags.ParseAddress(addrStr)
|
||||
commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
|
||||
}
|
||||
|
||||
acc := w.GetAccount(addr)
|
||||
if acc == nil {
|
||||
commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
|
||||
}
|
||||
|
||||
pass, err := getPassword()
|
||||
commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
|
||||
|
||||
return &acc.PrivateKey().PrivateKey
|
||||
}
|
||||
|
||||
func getPassword() (string, error) {
|
||||
// this check allows empty passwords
|
||||
if viper.IsSet("password") {
|
||||
return viper.GetString("password"), nil
|
||||
}
|
||||
|
||||
return input.ReadPassword("Enter password > ")
|
||||
}
|
31
cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
Normal file
31
cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func list(cmd *cobra.Command, _ []string) {
|
||||
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
|
||||
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
|
||||
appCfg := config.New(configFile, configDir, config.EnvPrefix)
|
||||
storageEngine := newEngine(cmd, appCfg)
|
||||
q := createQuarantine(cmd, storageEngine.DumpInfo())
|
||||
var containerID *cid.ID
|
||||
if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
|
||||
containerID = &cid.ID{}
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
|
||||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
|
||||
if containerID != nil && a.Container() != *containerID {
|
||||
return nil
|
||||
}
|
||||
cmd.Println(a.EncodeToString())
|
||||
return nil
|
||||
}))
|
||||
}
|
46
cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
Normal file
46
cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||
netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
|
||||
addresses := morphconfig.RPCEndpoint(appCfg)
|
||||
if len(addresses) == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
|
||||
}
|
||||
key := nodeconfig.Key(appCfg)
|
||||
cli, err := client.New(cmd.Context(),
|
||||
key,
|
||||
client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
|
||||
client.WithEndpoints(addresses...),
|
||||
client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
|
||||
)
|
||||
commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
|
||||
return cli
|
||||
}
|
||||
|
||||
func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
|
||||
hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
|
||||
commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
|
||||
cc, err := cntClient.NewFromMorph(morph, hs, 0)
|
||||
commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
|
||||
return cc
|
||||
}
|
||||
|
||||
func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
|
||||
hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
|
||||
commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
|
||||
cli, err := netmapClient.NewFromMorph(morph, hs, 0)
|
||||
commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
|
||||
return cli
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type quarantine struct {
|
||||
// mtx protects current field.
|
||||
mtx sync.Mutex
|
||||
current int
|
||||
trees []*fstree.FSTree
|
||||
}
|
||||
|
||||
func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
|
||||
var paths []string
|
||||
for _, sh := range engineInfo.Shards {
|
||||
var storagePaths []string
|
||||
for _, st := range sh.BlobStorInfo.SubStorages {
|
||||
storagePaths = append(storagePaths, st.Path)
|
||||
}
|
||||
if len(storagePaths) == 0 {
|
||||
continue
|
||||
}
|
||||
paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
|
||||
}
|
||||
q, err := newQuarantine(paths)
|
||||
commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
|
||||
return q
|
||||
}
|
||||
|
||||
func commonPath(paths []string) string {
|
||||
if len(paths) == 0 {
|
||||
return ""
|
||||
}
|
||||
if len(paths) == 1 {
|
||||
return paths[0]
|
||||
}
|
||||
minLen := math.MaxInt
|
||||
for _, p := range paths {
|
||||
if len(p) < minLen {
|
||||
minLen = len(p)
|
||||
}
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
for i := range minLen {
|
||||
for _, path := range paths[1:] {
|
||||
if paths[0][i] != path[i] {
|
||||
return sb.String()
|
||||
}
|
||||
}
|
||||
sb.WriteByte(paths[0][i])
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func newQuarantine(paths []string) (*quarantine, error) {
|
||||
var q quarantine
|
||||
for i := range paths {
|
||||
f := fstree.New(
|
||||
fstree.WithDepth(1),
|
||||
fstree.WithDirNameLen(1),
|
||||
fstree.WithPath(paths[i]),
|
||||
fstree.WithPerm(os.ModePerm),
|
||||
)
|
||||
if err := f.Open(mode.ComponentReadWrite); err != nil {
|
||||
return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
|
||||
}
|
||||
if err := f.Init(); err != nil {
|
||||
return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
|
||||
}
|
||||
q.trees = append(q.trees, f)
|
||||
}
|
||||
return &q, nil
|
||||
}
|
||||
|
||||
func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
|
||||
for i := range q.trees {
|
||||
res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return res.Object, nil
|
||||
}
|
||||
return nil, &apistatus.ObjectNotFound{}
|
||||
}
|
||||
|
||||
func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
|
||||
for i := range q.trees {
|
||||
_, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return &apistatus.ObjectNotFound{}
|
||||
}
|
||||
|
||||
func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
|
||||
data, err := obj.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var prm common.PutPrm
|
||||
prm.Address = objectcore.AddressOf(obj)
|
||||
prm.Object = obj
|
||||
prm.RawData = data
|
||||
|
||||
q.mtx.Lock()
|
||||
current := q.current
|
||||
q.current = (q.current + 1) % len(q.trees)
|
||||
q.mtx.Unlock()
|
||||
|
||||
_, err = q.trees[current].Put(ctx, prm)
|
||||
return err
|
||||
}
|
||||
|
||||
func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
|
||||
var prm common.IteratePrm
|
||||
prm.Handler = func(elem common.IterationElement) error {
|
||||
return f(elem.Address)
|
||||
}
|
||||
for i := range q.trees {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
_, err := q.trees[i].Iterate(ctx, prm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func remove(cmd *cobra.Command, _ []string) {
|
||||
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
|
||||
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
|
||||
appCfg := config.New(configFile, configDir, config.EnvPrefix)
|
||||
storageEngine := newEngine(cmd, appCfg)
|
||||
q := createQuarantine(cmd, storageEngine.DumpInfo())
|
||||
|
||||
var containerID cid.ID
|
||||
cidStr, _ := cmd.Flags().GetString(cidFlag)
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
|
||||
|
||||
var objectID *oid.ID
|
||||
oidStr, _ := cmd.Flags().GetString(oidFlag)
|
||||
if oidStr != "" {
|
||||
objectID = &oid.ID{}
|
||||
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
|
||||
}
|
||||
|
||||
if objectID != nil {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(containerID)
|
||||
addr.SetObject(*objectID)
|
||||
removeObject(cmd, q, addr)
|
||||
} else {
|
||||
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
|
||||
if addr.Container() != containerID {
|
||||
return nil
|
||||
}
|
||||
removeObject(cmd, q, addr)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
|
||||
err := q.Delete(cmd.Context(), addr)
|
||||
if errors.Is(err, new(apistatus.ObjectNotFound)) {
|
||||
return
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func restore(cmd *cobra.Command, _ []string) {
|
||||
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
|
||||
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
|
||||
appCfg := config.New(configFile, configDir, config.EnvPrefix)
|
||||
storageEngine := newEngine(cmd, appCfg)
|
||||
q := createQuarantine(cmd, storageEngine.DumpInfo())
|
||||
morphClient := createMorphClient(cmd, appCfg)
|
||||
cnrCli := createContainerClient(cmd, morphClient)
|
||||
|
||||
var containerID cid.ID
|
||||
cidStr, _ := cmd.Flags().GetString(cidFlag)
|
||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
|
||||
|
||||
var objectID *oid.ID
|
||||
oidStr, _ := cmd.Flags().GetString(oidFlag)
|
||||
if oidStr != "" {
|
||||
objectID = &oid.ID{}
|
||||
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
|
||||
}
|
||||
|
||||
if objectID != nil {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(containerID)
|
||||
addr.SetObject(*objectID)
|
||||
restoreObject(cmd, storageEngine, q, addr, cnrCli)
|
||||
} else {
|
||||
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
|
||||
if addr.Container() != containerID {
|
||||
return nil
|
||||
}
|
||||
restoreObject(cmd, storageEngine, q, addr, cnrCli)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
|
||||
obj, err := q.Get(cmd.Context(), addr)
|
||||
commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
|
||||
cid := addr.Container()
|
||||
cid.Encode(rawCID)
|
||||
cnr, err := cnrCli.Get(cmd.Context(), rawCID)
|
||||
commonCmd.ExitOnErr(cmd, "get container: %w", err)
|
||||
|
||||
putPrm := engine.PutPrm{
|
||||
Object: obj,
|
||||
IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
|
||||
commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
|
||||
}
|
123
cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
Normal file
123
cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
flagBatchSize = "batch-size"
|
||||
flagBatchSizeUsage = "Objects iteration batch size"
|
||||
cidFlag = "cid"
|
||||
cidFlagUsage = "Container ID"
|
||||
oidFlag = "oid"
|
||||
oidFlagUsage = "Object ID"
|
||||
walletFlag = "wallet"
|
||||
walletFlagShorthand = "w"
|
||||
walletFlagUsage = "Path to the wallet or binary key"
|
||||
addressFlag = "address"
|
||||
addressFlagUsage = "Address of wallet account"
|
||||
moveFlag = "move"
|
||||
moveFlagUsage = "Move objects from storage engine to quarantine"
|
||||
)
|
||||
|
||||
var (
|
||||
Cmd = &cobra.Command{
|
||||
Use: "zombie",
|
||||
Short: "Zombie objects related commands",
|
||||
}
|
||||
scanCmd = &cobra.Command{
|
||||
Use: "scan",
|
||||
Short: "Scan storage engine for zombie objects and move them to quarantine",
|
||||
Long: "",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
|
||||
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
|
||||
_ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
|
||||
_ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
|
||||
_ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
|
||||
_ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
|
||||
},
|
||||
Run: scan,
|
||||
}
|
||||
listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List zombie objects from quarantine",
|
||||
Long: "",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
|
||||
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
|
||||
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
|
||||
},
|
||||
Run: list,
|
||||
}
|
||||
restoreCmd = &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "Restore zombie objects from quarantine",
|
||||
Long: "",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
|
||||
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
|
||||
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
|
||||
_ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
|
||||
},
|
||||
Run: restore,
|
||||
}
|
||||
removeCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove zombie objects from quarantine",
|
||||
Long: "",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
|
||||
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
|
||||
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
|
||||
_ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
|
||||
},
|
||||
Run: remove,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
initScanCmd()
|
||||
initListCmd()
|
||||
initRestoreCmd()
|
||||
initRemoveCmd()
|
||||
}
|
||||
|
||||
func initScanCmd() {
|
||||
Cmd.AddCommand(scanCmd)
|
||||
|
||||
scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
|
||||
scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
|
||||
scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
|
||||
scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
|
||||
scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
|
||||
scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
|
||||
}
|
||||
|
||||
func initListCmd() {
|
||||
Cmd.AddCommand(listCmd)
|
||||
|
||||
listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
|
||||
listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
|
||||
listCmd.Flags().String(cidFlag, "", cidFlagUsage)
|
||||
}
|
||||
|
||||
func initRestoreCmd() {
|
||||
Cmd.AddCommand(restoreCmd)
|
||||
|
||||
restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
|
||||
restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
|
||||
restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
|
||||
restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
|
||||
}
|
||||
|
||||
func initRemoveCmd() {
|
||||
Cmd.AddCommand(removeCmd)
|
||||
|
||||
removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
|
||||
removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
|
||||
removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
|
||||
removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
|
||||
}
|
281
cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
Normal file
281
cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go
Normal file
|
@ -0,0 +1,281 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func scan(cmd *cobra.Command, _ []string) {
|
||||
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
|
||||
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
|
||||
appCfg := config.New(configFile, configDir, config.EnvPrefix)
|
||||
batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
|
||||
if batchSize == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
|
||||
}
|
||||
move, _ := cmd.Flags().GetBool(moveFlag)
|
||||
|
||||
storageEngine := newEngine(cmd, appCfg)
|
||||
morphClient := createMorphClient(cmd, appCfg)
|
||||
cnrCli := createContainerClient(cmd, morphClient)
|
||||
nmCli := createNetmapClient(cmd, morphClient)
|
||||
q := createQuarantine(cmd, storageEngine.DumpInfo())
|
||||
pk := getPrivateKey(cmd, appCfg)
|
||||
|
||||
epoch, err := nmCli.Epoch(cmd.Context())
|
||||
commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
|
||||
|
||||
nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
|
||||
commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
|
||||
|
||||
cmd.Printf("Epoch: %d\n", nm.Epoch())
|
||||
cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
|
||||
|
||||
ps := &processStatus{
|
||||
statusCount: make(map[status]uint64),
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tick := time.NewTicker(time.Second)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-tick.C:
|
||||
fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
|
||||
close(stopCh)
|
||||
}()
|
||||
wg.Wait()
|
||||
commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
|
||||
|
||||
cmd.Println()
|
||||
cmd.Println("Status description:")
|
||||
cmd.Println("undefined -- nothing is clear")
|
||||
cmd.Println("found -- object is found in cluster")
|
||||
cmd.Println("quarantine -- object is not found in cluster")
|
||||
cmd.Println()
|
||||
for status, count := range ps.statusCount {
|
||||
cmd.Printf("Status: %s, Count: %d\n", status, count)
|
||||
}
|
||||
}
|
||||
|
||||
type status string
|
||||
|
||||
const (
|
||||
statusUndefined status = "undefined"
|
||||
statusFound status = "found"
|
||||
statusQuarantine status = "quarantine"
|
||||
)
|
||||
|
||||
func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cid := obj.Address.Container()
|
||||
cid.Encode(rawCID)
|
||||
|
||||
cnr, err := cnrCli.Get(ctx, rawCID)
|
||||
if err != nil {
|
||||
var errContainerNotFound *apistatus.ContainerNotFound
|
||||
if errors.As(err, &errContainerNotFound) {
|
||||
// Policer will deal with this object.
|
||||
return statusFound, nil
|
||||
}
|
||||
return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
|
||||
}
|
||||
nm, err := nmCli.NetMap(ctx)
|
||||
if err != nil {
|
||||
return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
|
||||
}
|
||||
|
||||
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
|
||||
if err != nil {
|
||||
// Not enough nodes, check all netmap nodes.
|
||||
nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
|
||||
}
|
||||
|
||||
objID := obj.Address.Object()
|
||||
cnrID := obj.Address.Container()
|
||||
local := true
|
||||
raw := false
|
||||
if obj.ECInfo != nil {
|
||||
objID = obj.ECInfo.ParentID
|
||||
local = false
|
||||
raw = true
|
||||
}
|
||||
prm := clientSDK.PrmObjectHead{
|
||||
ObjectID: &objID,
|
||||
ContainerID: &cnrID,
|
||||
Local: local,
|
||||
Raw: raw,
|
||||
}
|
||||
|
||||
var ni clientCore.NodeInfo
|
||||
for i := range nodes {
|
||||
for j := range nodes[i] {
|
||||
if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
|
||||
return statusUndefined, fmt.Errorf("parse node info: %w", err)
|
||||
}
|
||||
c, err := cc.Get(ni)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
res, err := c.ObjectHead(ctx, prm)
|
||||
if err != nil {
|
||||
var errECInfo *objectSDK.ECInfoError
|
||||
if raw && errors.As(err, &errECInfo) {
|
||||
return statusFound, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := apistatus.ErrFromStatus(res.Status()); err != nil {
|
||||
continue
|
||||
}
|
||||
return statusFound, nil
|
||||
}
|
||||
}
|
||||
|
||||
if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
|
||||
return statusFound, nil
|
||||
}
|
||||
return statusQuarantine, nil
|
||||
}
|
||||
|
||||
func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
|
||||
appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
|
||||
) error {
|
||||
cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
|
||||
DialTimeout: apiclientconfig.DialTimeout(appCfg),
|
||||
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
|
||||
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
|
||||
Key: pk,
|
||||
AllowExternal: apiclientconfig.AllowExternal(appCfg),
|
||||
})
|
||||
ctx := cmd.Context()
|
||||
|
||||
var cursor *engine.Cursor
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
var prm engine.ListWithCursorPrm
|
||||
prm.WithCursor(cursor)
|
||||
prm.WithCount(batchSize)
|
||||
|
||||
res, err := storageEngine.ListWithCursor(ctx, prm)
|
||||
if err != nil {
|
||||
if errors.Is(err, engine.ErrEndOfListing) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("list with cursor: %w", err)
|
||||
}
|
||||
|
||||
cursor = res.Cursor()
|
||||
addrList := res.AddressList()
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
eg.SetLimit(int(batchSize))
|
||||
|
||||
for i := range addrList {
|
||||
addr := addrList[i]
|
||||
eg.Go(func() error {
|
||||
result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("check object %s status: %w", addr.Address, err)
|
||||
}
|
||||
ps.add(result)
|
||||
|
||||
if !move && result == statusQuarantine {
|
||||
cmd.Println(addr)
|
||||
return nil
|
||||
}
|
||||
|
||||
if result == statusQuarantine {
|
||||
return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return fmt.Errorf("process objects batch: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
|
||||
var getPrm engine.GetPrm
|
||||
getPrm.WithAddress(addr)
|
||||
res, err := storageEngine.Get(ctx, getPrm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get object %s from storage engine: %w", addr, err)
|
||||
}
|
||||
|
||||
if err := q.Put(ctx, res.Object()); err != nil {
|
||||
return fmt.Errorf("put object %s to quarantine: %w", addr, err)
|
||||
}
|
||||
|
||||
var delPrm engine.DeletePrm
|
||||
delPrm.WithForceRemoval()
|
||||
delPrm.WithAddress(addr)
|
||||
|
||||
if err = storageEngine.Delete(ctx, delPrm); err != nil {
|
||||
return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type processStatus struct {
|
||||
guard sync.RWMutex
|
||||
statusCount map[status]uint64
|
||||
count uint64
|
||||
}
|
||||
|
||||
func (s *processStatus) add(st status) {
|
||||
s.guard.Lock()
|
||||
defer s.guard.Unlock()
|
||||
s.statusCount[st]++
|
||||
s.count++
|
||||
}
|
||||
|
||||
func (s *processStatus) total() uint64 {
|
||||
s.guard.RLock()
|
||||
defer s.guard.RUnlock()
|
||||
return s.count
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
package zombie
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
|
||||
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
|
||||
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
|
||||
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"go.etcd.io/bbolt"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
|
||||
ngOpts := storageEngineOptions(c)
|
||||
shardOpts := shardOptions(cmd, c)
|
||||
e := engine.New(ngOpts...)
|
||||
for _, opts := range shardOpts {
|
||||
_, err := e.AddShard(cmd.Context(), opts...)
|
||||
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
|
||||
commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
|
||||
return e
|
||||
}
|
||||
|
||||
func storageEngineOptions(c *config.Config) []engine.Option {
|
||||
return []engine.Option{
|
||||
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
|
||||
engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
|
||||
}
|
||||
}
|
||||
|
||||
func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
|
||||
var result [][]shard.Option
|
||||
err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
|
||||
result = append(result, getShardOpts(cmd, c, sh))
|
||||
return nil
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
|
||||
return result
|
||||
}
|
||||
|
||||
func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
|
||||
wc, wcEnabled := getWriteCacheOpts(sh)
|
||||
return []shard.Option{
|
||||
shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
shard.WithRefillMetabase(sh.RefillMetabase()),
|
||||
shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
|
||||
shard.WithMode(sh.Mode()),
|
||||
shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
|
||||
shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
|
||||
shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
|
||||
shard.WithWriteCache(wcEnabled),
|
||||
shard.WithWriteCacheOptions(wc),
|
||||
shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
|
||||
shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
|
||||
shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
|
||||
shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
|
||||
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
|
||||
pool, err := ants.NewPool(sz)
|
||||
commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
|
||||
return pool
|
||||
}),
|
||||
shard.WithLimiter(qos.NewNoopLimiter()),
|
||||
}
|
||||
}
|
||||
|
||||
func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
|
||||
if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
|
||||
var result []writecache.Option
|
||||
result = append(result,
|
||||
writecache.WithPath(wc.Path()),
|
||||
writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
|
||||
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
|
||||
writecache.WithFlushWorkersCount(wc.WorkerCount()),
|
||||
writecache.WithMaxCacheSize(wc.SizeLimit()),
|
||||
writecache.WithMaxCacheCount(wc.CountLimit()),
|
||||
writecache.WithNoSync(wc.NoSync()),
|
||||
writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
writecache.WithQoSLimiter(qos.NewNoopLimiter()),
|
||||
)
|
||||
return result, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
|
||||
var piloramaOpts []pilorama.Option
|
||||
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
||||
pr := sh.Pilorama()
|
||||
piloramaOpts = append(piloramaOpts,
|
||||
pilorama.WithPath(pr.Path()),
|
||||
pilorama.WithPerm(pr.Perm()),
|
||||
pilorama.WithNoSync(pr.NoSync()),
|
||||
pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
|
||||
pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
|
||||
)
|
||||
}
|
||||
return piloramaOpts
|
||||
}
|
||||
|
||||
func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
|
||||
return []meta.Option{
|
||||
meta.WithPath(sh.Metabase().Path()),
|
||||
meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
|
||||
meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
|
||||
meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
|
||||
meta.WithBoltDBOptions(&bbolt.Options{
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}),
|
||||
meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
meta.WithEpochState(&epochState{}),
|
||||
}
|
||||
}
|
||||
|
||||
func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
|
||||
result := []blobstor.Option{
|
||||
blobstor.WithCompressObjects(sh.Compress()),
|
||||
blobstor.WithUncompressableContentTypes(sh.UncompressableContentTypes()),
|
||||
blobstor.WithCompressibilityEstimate(sh.EstimateCompressibility()),
|
||||
blobstor.WithCompressibilityEstimateThreshold(sh.EstimateCompressibilityThreshold()),
|
||||
blobstor.WithStorages(getSubStorages(ctx, sh)),
|
||||
blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
|
||||
var ss []blobstor.SubStorage
|
||||
for _, storage := range sh.BlobStor().Storages() {
|
||||
switch storage.Type() {
|
||||
case blobovniczatree.Type:
|
||||
sub := blobovniczaconfig.From((*config.Config)(storage))
|
||||
blobTreeOpts := []blobovniczatree.Option{
|
||||
blobovniczatree.WithRootPath(storage.Path()),
|
||||
blobovniczatree.WithPermissions(storage.Perm()),
|
||||
blobovniczatree.WithBlobovniczaSize(sub.Size()),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
|
||||
blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
|
||||
blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
|
||||
blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
|
||||
blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
|
||||
blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
|
||||
blobovniczatree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
|
||||
}
|
||||
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
return uint64(len(data)) < sh.SmallSizeLimit()
|
||||
},
|
||||
})
|
||||
case fstree.Type:
|
||||
sub := fstreeconfig.From((*config.Config)(storage))
|
||||
fstreeOpts := []fstree.Option{
|
||||
fstree.WithPath(storage.Path()),
|
||||
fstree.WithPerm(storage.Perm()),
|
||||
fstree.WithDepth(sub.Depth()),
|
||||
fstree.WithNoSync(sub.NoSync()),
|
||||
fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
|
||||
}
|
||||
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: fstree.New(fstreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, _ []byte) bool {
|
||||
return true
|
||||
},
|
||||
})
|
||||
default:
|
||||
// should never happen, that has already
|
||||
// been handled: when the config was read
|
||||
}
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
type epochState struct{}
|
||||
|
||||
func (epochState) CurrentEpoch() uint64 {
|
||||
return 0
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
|
@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
|
|||
helper.GetAlphabetNNSDomain(i),
|
||||
int64(nns.TXT))
|
||||
}
|
||||
if w.Err != nil {
|
||||
panic(w.Err)
|
||||
}
|
||||
assert.NoError(w.Err)
|
||||
|
||||
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
|
||||
if err != nil {
|
||||
|
@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
|
|||
for i := range accounts {
|
||||
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
|
||||
}
|
||||
if w.Err != nil {
|
||||
panic(w.Err)
|
||||
}
|
||||
assert.NoError(w.Err)
|
||||
|
||||
res, err := c.Run(w.Bytes())
|
||||
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
|
@ -235,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
|
|||
|
||||
putContainer(bw, ch, cnt)
|
||||
|
||||
if bw.Err != nil {
|
||||
panic(bw.Err)
|
||||
}
|
||||
assert.NoError(bw.Err)
|
||||
|
||||
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
|
||||
return err
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
"github.com/nspcc-dev/neo-go/cli/cmdargs"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
|
@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if writer.Err != nil {
|
||||
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
|
||||
}
|
||||
assert.NoError(writer.Err, "can't create deployment script")
|
||||
|
||||
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
|
||||
return err
|
||||
|
@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
|
|||
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
|
||||
}
|
||||
|
||||
if bw.Err != nil {
|
||||
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
|
||||
} else if bw.Len() != start {
|
||||
assert.NoError(bw.Err, "can't create deployment script")
|
||||
if bw.Len() != start {
|
||||
writer.WriteBytes(bw.Bytes())
|
||||
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
|
||||
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
|
@ -236,21 +237,17 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
|
|||
} else {
|
||||
sub.Reset()
|
||||
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
|
||||
if sub.Err != nil {
|
||||
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
|
||||
}
|
||||
assert.NoError(sub.Err, "can't create version script")
|
||||
|
||||
script := sub.Bytes()
|
||||
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
|
||||
bw.BinWriter.WriteBytes(script)
|
||||
bw.WriteBytes(script)
|
||||
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
|
||||
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
|
||||
}
|
||||
}
|
||||
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
|
||||
if bw.Err != nil {
|
||||
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
|
||||
}
|
||||
assert.NoError(bw.Err, "can't create version script")
|
||||
|
||||
res, err := c.InvokeScript(bw.Bytes(), nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
|
@ -13,9 +14,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -187,19 +186,9 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
|
|||
}
|
||||
|
||||
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
|
||||
switch c.(type) {
|
||||
case *rpcclient.Client:
|
||||
inv := invoker.New(c, nil)
|
||||
reader := nns2.NewReader(inv, nnsHash)
|
||||
return reader.IsAvailable(name)
|
||||
default:
|
||||
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
inv := invoker.New(c, nil)
|
||||
reader := nns2.NewReader(inv, nnsHash)
|
||||
return reader.IsAvailable(name)
|
||||
}
|
||||
|
||||
func CheckNotaryEnabled(c Client) error {
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
|
@ -21,6 +22,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
|
||||
|
@ -28,7 +30,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -375,9 +376,7 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen
|
|||
}
|
||||
act, err = actor.New(c.Client, signers)
|
||||
} else {
|
||||
if withConsensus {
|
||||
panic("BUG: should never happen")
|
||||
}
|
||||
assert.False(withConsensus, "BUG: should never happen")
|
||||
act, err = c.CommitteeAct, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -411,11 +410,9 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp
|
|||
|
||||
func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
|
||||
version, err := c.Client.GetVersion()
|
||||
if err != nil {
|
||||
// error appears only if client
|
||||
// has not been initialized
|
||||
panic(err)
|
||||
}
|
||||
// error appears only if client
|
||||
// has not been initialized
|
||||
assert.NoError(err)
|
||||
network := version.Protocol.Network
|
||||
|
||||
// Use parameter context to avoid dealing with signature order.
|
||||
|
@ -447,12 +444,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin
|
|||
|
||||
for i := range tx.Signers {
|
||||
if tx.Signers[i].Account == h {
|
||||
assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
|
||||
if i < len(tx.Scripts) {
|
||||
tx.Scripts[i] = *w
|
||||
} else if i == len(tx.Scripts) {
|
||||
}
|
||||
if i == len(tx.Scripts) {
|
||||
tx.Scripts = append(tx.Scripts, *w)
|
||||
} else {
|
||||
panic("BUG: invalid signing order")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -510,9 +507,7 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
|
|||
int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
|
||||
if bw.Err != nil {
|
||||
panic(bw.Err)
|
||||
}
|
||||
assert.NoError(bw.Err)
|
||||
return bw.Bytes(), false, nil
|
||||
}
|
||||
|
||||
|
@ -524,12 +519,8 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
|
|||
}
|
||||
|
||||
func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
|
||||
res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return res.State == vmstate.Halt.String(), nil
|
||||
avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone))
|
||||
return !avail, err
|
||||
}
|
||||
|
||||
func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core"
|
||||
|
@ -316,9 +317,7 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint
|
|||
func (l *LocalClient) putTransactions() error {
|
||||
// 1. Prepare new block.
|
||||
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
assert.NoError(err)
|
||||
defer func() { l.transactions = l.transactions[:0] }()
|
||||
|
||||
b := &block.Block{
|
||||
|
@ -359,9 +358,7 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s
|
|||
w := io.NewBufBinWriter()
|
||||
emit.Array(w.BinWriter, parameters...)
|
||||
emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
|
||||
if w.Err != nil {
|
||||
panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
|
||||
}
|
||||
assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
|
||||
return c.InvokeScript(w.Bytes(), signers)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -111,9 +112,7 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b
|
|||
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
|
||||
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
|
||||
|
||||
if w.Err != nil {
|
||||
panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
|
||||
}
|
||||
assert.NoError(w.Err, "can't wrap register script")
|
||||
}
|
||||
|
||||
func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
package initialize
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -30,7 +27,8 @@ const (
|
|||
)
|
||||
|
||||
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
|
||||
regPrice, err := getCandidateRegisterPrice(c)
|
||||
reader := neo.NewReader(c.ReadOnlyInvoker)
|
||||
regPrice, err := reader.GetRegisterPrice()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch registration price: %w", err)
|
||||
}
|
||||
|
@ -42,9 +40,7 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
|
|||
emit.Opcodes(w.BinWriter, opcode.ASSERT)
|
||||
}
|
||||
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
|
||||
if w.Err != nil {
|
||||
panic(fmt.Sprintf("BUG: %v", w.Err))
|
||||
}
|
||||
assert.NoError(w.Err)
|
||||
|
||||
signers := []actor.SignerAccount{{
|
||||
Signer: c.GetSigner(false, c.CommitteeAcc),
|
||||
|
@ -116,7 +112,7 @@ func registerCandidates(c *helper.InitializeContext) error {
|
|||
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
|
||||
neoHash := neo.Hash
|
||||
|
||||
ok, err := transferNEOFinished(c, neoHash)
|
||||
ok, err := transferNEOFinished(c)
|
||||
if ok || err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -139,33 +135,8 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
|
|||
return c.AwaitTx()
|
||||
}
|
||||
|
||||
func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
|
||||
r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
|
||||
func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
|
||||
r := neo.NewReader(c.ReadOnlyInvoker)
|
||||
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
|
||||
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
|
||||
}
|
||||
|
||||
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
|
||||
|
||||
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
|
||||
switch c.Client.(type) {
|
||||
case *rpcclient.Client:
|
||||
inv := invoker.New(c.Client, nil)
|
||||
reader := neo.NewReader(inv)
|
||||
return reader.GetRegisterPrice()
|
||||
default:
|
||||
neoHash := neo.Hash
|
||||
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(res.Stack) == 0 {
|
||||
return 0, errGetPriceInvalid
|
||||
}
|
||||
bi, err := res.Stack[0].TryInteger()
|
||||
if err != nil || !bi.IsInt64() {
|
||||
return 0, errGetPriceInvalid
|
||||
}
|
||||
return bi.Int64(), nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
|
||||
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
||||
|
@ -41,8 +41,8 @@ func init() {
|
|||
|
||||
rootCmd.AddCommand(config.RootCmd)
|
||||
rootCmd.AddCommand(morph.RootCmd)
|
||||
rootCmd.AddCommand(storagecfg.RootCmd)
|
||||
rootCmd.AddCommand(metabase.RootCmd)
|
||||
rootCmd.AddCommand(maintenance.RootCmd)
|
||||
|
||||
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
||||
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
|
||||
|
|
|
@ -1,135 +0,0 @@
|
|||
package storagecfg
|
||||
|
||||
const configTemplate = `logger:
|
||||
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
|
||||
|
||||
node:
|
||||
wallet:
|
||||
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
|
||||
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
|
||||
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
|
||||
addresses: # list of addresses announced by Storage node in the Network map
|
||||
- {{ .AnnouncedAddress }}
|
||||
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
|
||||
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
|
||||
|
||||
grpc:
|
||||
num: 1 # total number of listener endpoints
|
||||
0:
|
||||
endpoint: {{ .Endpoint }} # endpoint for gRPC server
|
||||
tls:{{if .TLSCert}}
|
||||
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
|
||||
certificate: {{ .TLSCert }} # path to TLS certificate
|
||||
key: {{ .TLSKey }} # path to TLS key
|
||||
{{- else }}
|
||||
enabled: false # disable TLS for a gRPC connection
|
||||
{{- end}}
|
||||
|
||||
control:
|
||||
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
|
||||
{{- range .AuthorizedKeys }}
|
||||
- {{.}}{{end}}
|
||||
grpc:
|
||||
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
|
||||
|
||||
morph:
|
||||
dial_timeout: 20s # timeout for side chain NEO RPC client connection
|
||||
cache_ttl: 15s # use TTL cache for side chain GET operations
|
||||
rpc_endpoint: # side chain N3 RPC endpoints
|
||||
{{- range .MorphRPC }}
|
||||
- address: wss://{{.}}/ws{{end}}
|
||||
{{if not .Relay }}
|
||||
storage:
|
||||
shard:
|
||||
default: # section with the default shard parameters
|
||||
metabase:
|
||||
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
||||
|
||||
blobstor:
|
||||
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||
depth: 2 # max depth of object tree storage in FS
|
||||
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
|
||||
compress: true # turn on/off Zstandard compression (level 3) of stored objects
|
||||
compression_exclude_content_types:
|
||||
- audio/*
|
||||
- video/*
|
||||
|
||||
blobovnicza:
|
||||
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
||||
depth: 1 # max depth of object tree storage in key-value DB
|
||||
width: 4 # max width of object tree storage in key-value DB
|
||||
opened_cache_capacity: 50 # maximum number of opened database files
|
||||
opened_cache_ttl: 5m # ttl for opened database file
|
||||
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
|
||||
|
||||
gc:
|
||||
remover_batch_size: 200 # number of objects to be removed by the garbage collector
|
||||
remover_sleep_interval: 5m # frequency of the garbage collector invocation
|
||||
0:
|
||||
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
|
||||
|
||||
metabase:
|
||||
path: {{ .MetabasePath }} # path to the metabase
|
||||
|
||||
blobstor:
|
||||
path: {{ .BlobstorPath }} # path to the blobstor
|
||||
{{end}}`
|
||||
|
||||
const (
|
||||
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
|
||||
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
|
||||
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
|
||||
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
|
||||
)
|
||||
|
||||
var n3config = map[string]struct {
|
||||
MorphRPC []string
|
||||
RPC []string
|
||||
NeoFSContract string
|
||||
BalanceContract string
|
||||
}{
|
||||
"testnet": {
|
||||
MorphRPC: []string{
|
||||
"rpc01.morph.testnet.fs.neo.org:51331",
|
||||
"rpc02.morph.testnet.fs.neo.org:51331",
|
||||
"rpc03.morph.testnet.fs.neo.org:51331",
|
||||
"rpc04.morph.testnet.fs.neo.org:51331",
|
||||
"rpc05.morph.testnet.fs.neo.org:51331",
|
||||
"rpc06.morph.testnet.fs.neo.org:51331",
|
||||
"rpc07.morph.testnet.fs.neo.org:51331",
|
||||
},
|
||||
RPC: []string{
|
||||
"rpc01.testnet.n3.nspcc.ru:21331",
|
||||
"rpc02.testnet.n3.nspcc.ru:21331",
|
||||
"rpc03.testnet.n3.nspcc.ru:21331",
|
||||
"rpc04.testnet.n3.nspcc.ru:21331",
|
||||
"rpc05.testnet.n3.nspcc.ru:21331",
|
||||
"rpc06.testnet.n3.nspcc.ru:21331",
|
||||
"rpc07.testnet.n3.nspcc.ru:21331",
|
||||
},
|
||||
NeoFSContract: neofsTestnetAddress,
|
||||
BalanceContract: balanceTestnetAddress,
|
||||
},
|
||||
"mainnet": {
|
||||
MorphRPC: []string{
|
||||
"rpc1.morph.fs.neo.org:40341",
|
||||
"rpc2.morph.fs.neo.org:40341",
|
||||
"rpc3.morph.fs.neo.org:40341",
|
||||
"rpc4.morph.fs.neo.org:40341",
|
||||
"rpc5.morph.fs.neo.org:40341",
|
||||
"rpc6.morph.fs.neo.org:40341",
|
||||
"rpc7.morph.fs.neo.org:40341",
|
||||
},
|
||||
RPC: []string{
|
||||
"rpc1.n3.nspcc.ru:10331",
|
||||
"rpc2.n3.nspcc.ru:10331",
|
||||
"rpc3.n3.nspcc.ru:10331",
|
||||
"rpc4.n3.nspcc.ru:10331",
|
||||
"rpc5.n3.nspcc.ru:10331",
|
||||
"rpc6.n3.nspcc.ru:10331",
|
||||
"rpc7.n3.nspcc.ru:10331",
|
||||
},
|
||||
NeoFSContract: neofsMainnetAddress,
|
||||
BalanceContract: balanceMainnetAddress,
|
||||
},
|
||||
}
|
|
@ -1,432 +0,0 @@
|
|||
package storagecfg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
"github.com/chzyer/readline"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/cli/input"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
walletFlag = "wallet"
|
||||
accountFlag = "account"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultControlEndpoint = "localhost:8090"
|
||||
defaultDataEndpoint = "localhost"
|
||||
)
|
||||
|
||||
// RootCmd is a root command of config section.
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
|
||||
Short: "Section for storage node configuration commands",
|
||||
Run: storageConfig,
|
||||
}
|
||||
|
||||
func init() {
|
||||
fs := RootCmd.Flags()
|
||||
|
||||
fs.StringP(walletFlag, "w", "", "Path to wallet")
|
||||
fs.StringP(accountFlag, "a", "", "Wallet account")
|
||||
}
|
||||
|
||||
type config struct {
|
||||
AnnouncedAddress string
|
||||
AuthorizedKeys []string
|
||||
ControlEndpoint string
|
||||
Endpoint string
|
||||
TLSCert string
|
||||
TLSKey string
|
||||
MorphRPC []string
|
||||
Attribute struct {
|
||||
Locode string
|
||||
}
|
||||
Wallet struct {
|
||||
Path string
|
||||
Account string
|
||||
Password string
|
||||
}
|
||||
Relay bool
|
||||
BlobstorPath string
|
||||
MetabasePath string
|
||||
}
|
||||
|
||||
func storageConfig(cmd *cobra.Command, args []string) {
|
||||
outPath := getOutputPath(args)
|
||||
|
||||
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
|
||||
readline.SetHistoryPath(historyPath)
|
||||
|
||||
var c config
|
||||
|
||||
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
|
||||
if c.Wallet.Path == "" {
|
||||
c.Wallet.Path = getPath("Path to the storage node wallet: ")
|
||||
}
|
||||
|
||||
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
|
||||
fatalOnErr(err)
|
||||
|
||||
fillWalletAccount(cmd, &c, w)
|
||||
|
||||
accH, err := flags.ParseAddress(c.Wallet.Account)
|
||||
fatalOnErr(err)
|
||||
|
||||
acc := w.GetAccount(accH)
|
||||
if acc == nil {
|
||||
fatalOnErr(errors.New("can't find account in wallet"))
|
||||
}
|
||||
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||
fatalOnErr(err)
|
||||
|
||||
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||
fatalOnErr(err)
|
||||
|
||||
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
|
||||
|
||||
network := readNetwork(cmd)
|
||||
|
||||
c.MorphRPC = n3config[network].MorphRPC
|
||||
|
||||
depositGas(cmd, acc, network)
|
||||
|
||||
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
|
||||
|
||||
endpoint := getDefaultEndpoint(cmd, &c)
|
||||
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
|
||||
if c.Endpoint == "" {
|
||||
c.Endpoint = endpoint
|
||||
}
|
||||
|
||||
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
|
||||
if c.ControlEndpoint == "" {
|
||||
c.ControlEndpoint = defaultControlEndpoint
|
||||
}
|
||||
|
||||
c.TLSCert = getPath("TLS Certificate (optional): ")
|
||||
if c.TLSCert != "" {
|
||||
c.TLSKey = getPath("TLS Key: ")
|
||||
}
|
||||
|
||||
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
|
||||
if !c.Relay {
|
||||
p := getPath("Path to the storage directory (all available storage will be used): ")
|
||||
c.BlobstorPath = filepath.Join(p, "blob")
|
||||
c.MetabasePath = filepath.Join(p, "meta")
|
||||
}
|
||||
|
||||
out := applyTemplate(c)
|
||||
fatalOnErr(os.WriteFile(outPath, out, 0o644))
|
||||
|
||||
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
|
||||
}
|
||||
|
||||
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
|
||||
var addr, port string
|
||||
for {
|
||||
c.AnnouncedAddress = getString("Publicly announced address: ")
|
||||
validator := netutil.Address{}
|
||||
err := validator.FromString(c.AnnouncedAddress)
|
||||
if err != nil {
|
||||
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
|
||||
continue
|
||||
}
|
||||
uriAddr, err := url.Parse(validator.URIAddr())
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
addr = uriAddr.Hostname()
|
||||
port = uriAddr.Port()
|
||||
ip, err := net.ResolveIPAddr("ip", addr)
|
||||
if err != nil {
|
||||
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ip.IP.IsGlobalUnicast() {
|
||||
cmd.Println("IP must be global unicast.")
|
||||
continue
|
||||
}
|
||||
cmd.Printf("Resolved IP address: %s\n", ip.String())
|
||||
|
||||
_, err = strconv.ParseUint(port, 10, 16)
|
||||
if err != nil {
|
||||
cmd.Println("Port must be an integer.")
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
return net.JoinHostPort(defaultDataEndpoint, port)
|
||||
}
|
||||
|
||||
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
|
||||
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
|
||||
if c.Wallet.Account == "" {
|
||||
addr := address.Uint160ToString(w.GetChangeAddress())
|
||||
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
|
||||
if c.Wallet.Account == "" {
|
||||
c.Wallet.Account = addr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readNetwork(cmd *cobra.Command) string {
|
||||
var network string
|
||||
for {
|
||||
network = getString("Choose network [mainnet]/testnet: ")
|
||||
switch network {
|
||||
case "":
|
||||
network = "mainnet"
|
||||
case "testnet", "mainnet":
|
||||
default:
|
||||
cmd.Println(`Network must be either "mainnet" or "testnet"`)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return network
|
||||
}
|
||||
|
||||
func getOutputPath(args []string) string {
|
||||
if len(args) != 0 {
|
||||
return args[0]
|
||||
}
|
||||
outPath := getPath("File to write config at [./config.yml]: ")
|
||||
if outPath == "" {
|
||||
outPath = "./config.yml"
|
||||
}
|
||||
return outPath
|
||||
}
|
||||
|
||||
func getWalletAccount(w *wallet.Wallet, prompt string) string {
|
||||
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
|
||||
for i := range w.Accounts {
|
||||
addrs[i] = readline.PcItem(w.Accounts[i].Address)
|
||||
}
|
||||
|
||||
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
|
||||
defer readline.SetAutoComplete(nil)
|
||||
|
||||
s, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
|
||||
}
|
||||
|
||||
func getString(prompt string) string {
|
||||
s, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
if s != "" {
|
||||
_ = readline.AddHistory(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type filenameCompleter struct{}
|
||||
|
||||
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
|
||||
prefix := string(line[:pos])
|
||||
dir := filepath.Dir(prefix)
|
||||
de, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
for i := range de {
|
||||
name := filepath.Join(dir, de[i].Name())
|
||||
if strings.HasPrefix(name, prefix) {
|
||||
tail := []rune(strings.TrimPrefix(name, prefix))
|
||||
if de[i].IsDir() {
|
||||
tail = append(tail, filepath.Separator)
|
||||
}
|
||||
newLine = append(newLine, tail)
|
||||
}
|
||||
}
|
||||
if pos != 0 {
|
||||
return newLine, pos - len([]rune(dir))
|
||||
}
|
||||
return newLine, 0
|
||||
}
|
||||
|
||||
func getPath(prompt string) string {
|
||||
readline.SetAutoComplete(filenameCompleter{})
|
||||
defer readline.SetAutoComplete(nil)
|
||||
|
||||
p, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
|
||||
if p == "" {
|
||||
return p
|
||||
}
|
||||
|
||||
_ = readline.AddHistory(p)
|
||||
|
||||
abs, err := filepath.Abs(p)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
|
||||
}
|
||||
|
||||
return abs
|
||||
}
|
||||
|
||||
func getConfirmation(def bool, prompt string) bool {
|
||||
for {
|
||||
s, err := readline.Line(prompt)
|
||||
fatalOnErr(err)
|
||||
|
||||
switch strings.ToLower(s) {
|
||||
case "y", "yes":
|
||||
return true
|
||||
case "n", "no":
|
||||
return false
|
||||
default:
|
||||
if len(s) == 0 {
|
||||
return def
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func applyTemplate(c config) []byte {
|
||||
tmpl, err := template.New("config").Parse(configTemplate)
|
||||
fatalOnErr(err)
|
||||
|
||||
b := bytes.NewBuffer(nil)
|
||||
fatalOnErr(tmpl.Execute(b, c))
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func fatalOnErr(err error) {
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
|
||||
sideClient := initClient(n3config[network].MorphRPC)
|
||||
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
|
||||
|
||||
sideActor, err := actor.NewSimple(sideClient, acc)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
|
||||
}
|
||||
|
||||
sideGas := nep17.NewReader(sideActor, balanceHash)
|
||||
accSH := acc.Contract.ScriptHash()
|
||||
|
||||
balance, err := sideGas.BalanceOf(accSH)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
|
||||
}
|
||||
|
||||
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
|
||||
fixedn.ToString(balance, 12)))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
amountStr := getString("Enter amount in GAS: ")
|
||||
amount, err := fixedn.FromString(amountStr, 8)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
|
||||
}
|
||||
|
||||
mainClient := initClient(n3config[network].RPC)
|
||||
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
|
||||
|
||||
mainActor, err := actor.NewSimple(mainClient, acc)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
|
||||
}
|
||||
|
||||
mainGas := nep17.New(mainActor, gas.Hash)
|
||||
|
||||
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
|
||||
if err != nil {
|
||||
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
|
||||
}
|
||||
|
||||
cmd.Print("Waiting for transactions to persist.")
|
||||
tick := time.NewTicker(time.Second / 2)
|
||||
defer tick.Stop()
|
||||
|
||||
timer := time.NewTimer(time.Second * 20)
|
||||
defer timer.Stop()
|
||||
|
||||
at := trigger.Application
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
_, err := mainClient.GetApplicationLog(txHash, &at)
|
||||
if err == nil {
|
||||
cmd.Print("\n")
|
||||
break loop
|
||||
}
|
||||
cmd.Print(".")
|
||||
case <-timer.C:
|
||||
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
|
||||
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
|
||||
return
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initClient(rpc []string) *rpcclient.Client {
|
||||
var c *rpcclient.Client
|
||||
var err error
|
||||
|
||||
shuffled := slices.Clone(rpc)
|
||||
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||
|
||||
for _, endpoint := range shuffled {
|
||||
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
|
||||
DialTimeout: time.Second * 2,
|
||||
RequestTimeout: time.Second * 5,
|
||||
})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err = c.Init(); err != nil {
|
||||
continue
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
|
||||
panic("unreachable")
|
||||
}
|
|
@ -858,6 +858,8 @@ type PatchObjectPrm struct {
|
|||
|
||||
ReplaceAttribute bool
|
||||
|
||||
NewSplitHeader *objectSDK.SplitHeader
|
||||
|
||||
PayloadPatches []PayloadPatch
|
||||
}
|
||||
|
||||
|
@ -888,7 +890,11 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
|
|||
return nil, fmt.Errorf("init payload reading: %w", err)
|
||||
}
|
||||
|
||||
if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
|
||||
if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
|
||||
NewSplitHeader: prm.NewSplitHeader,
|
||||
NewAttributes: prm.NewAttributes,
|
||||
ReplaceAttributes: prm.ReplaceAttribute,
|
||||
}) {
|
||||
for _, pp := range prm.PayloadPatches {
|
||||
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
|
||||
if err != nil {
|
||||
|
|
|
@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
|
|||
prmDial := client.PrmDial{
|
||||
Endpoint: addr.URIAddr(),
|
||||
GRPCDialOptions: []grpc.DialOption{
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
|
||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||
},
|
||||
|
|
|
@ -44,6 +44,7 @@ is set to current epoch + n.
|
|||
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
|
||||
_ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
|
||||
_ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC))
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -81,7 +82,7 @@ func createToken(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
|
||||
|
||||
if iatRelative || expRelative || nvbRelative {
|
||||
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
|
||||
endpoint := viper.GetString(commonflags.RPC)
|
||||
if len(endpoint) == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
|
||||
}
|
||||
|
|
|
@ -5,7 +5,9 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
|
@ -19,8 +21,9 @@ import (
|
|||
)
|
||||
|
||||
type policyPlaygroundREPL struct {
|
||||
cmd *cobra.Command
|
||||
nodes map[string]netmap.NodeInfo
|
||||
cmd *cobra.Command
|
||||
nodes map[string]netmap.NodeInfo
|
||||
console *readline.Instance
|
||||
}
|
||||
|
||||
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
|
||||
|
@ -40,7 +43,7 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
|
|||
node.IterateAttributes(func(k, v string) {
|
||||
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
|
||||
})
|
||||
fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
|
||||
fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
|
@ -147,12 +150,29 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error {
|
|||
for _, node := range ns {
|
||||
ids = append(ids, hex.EncodeToString(node.PublicKey()))
|
||||
}
|
||||
fmt.Printf("\t%2d: %v\n", i+1, ids)
|
||||
fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleHelp(args []string) error {
|
||||
if len(args) != 0 {
|
||||
if _, ok := commands[args[0]]; !ok {
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
}
|
||||
fmt.Fprintln(repl.console, commands[args[0]].usage)
|
||||
return nil
|
||||
}
|
||||
|
||||
commandList := slices.Collect(maps.Keys(commands))
|
||||
slices.Sort(commandList)
|
||||
for _, command := range commandList {
|
||||
fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
|
||||
var nm netmap.NetMap
|
||||
var nodes []netmap.NodeInfo
|
||||
|
@ -163,15 +183,104 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
|
|||
return nm
|
||||
}
|
||||
|
||||
var policyPlaygroundCompleter = readline.NewPrefixCompleter(
|
||||
readline.PcItem("list"),
|
||||
readline.PcItem("ls"),
|
||||
readline.PcItem("add"),
|
||||
readline.PcItem("load"),
|
||||
readline.PcItem("remove"),
|
||||
readline.PcItem("rm"),
|
||||
readline.PcItem("eval"),
|
||||
)
|
||||
type commandDescription struct {
|
||||
descriprion string
|
||||
usage string
|
||||
}
|
||||
|
||||
var commands = map[string]commandDescription{
|
||||
"list": {
|
||||
descriprion: "Display all nodes in the netmap",
|
||||
usage: `Display all nodes in the netmap
|
||||
Example of usage:
|
||||
list
|
||||
1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
|
||||
2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
|
||||
`,
|
||||
},
|
||||
|
||||
"ls": {
|
||||
descriprion: "Display all nodes in the netmap",
|
||||
usage: `Display all nodes in the netmap
|
||||
Example of usage:
|
||||
ls
|
||||
1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
|
||||
2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
|
||||
`,
|
||||
},
|
||||
|
||||
"add": {
|
||||
descriprion: "Add a new node: add <node-hash> attr=value",
|
||||
usage: `Add a new node
|
||||
Example of usage:
|
||||
add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`,
|
||||
},
|
||||
|
||||
"load": {
|
||||
descriprion: "Load netmap from file: load <path>",
|
||||
usage: `Load netmap from file
|
||||
Example of usage:
|
||||
load "netmap.json"
|
||||
File format (netmap.json):
|
||||
{
|
||||
"03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": {
|
||||
"continent": "Europe",
|
||||
"country": "Poland"
|
||||
},
|
||||
"02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": {
|
||||
"continent": "Antarctica",
|
||||
"country": "Heard Island"
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
"remove": {
|
||||
descriprion: "Remove a node: remove <node-hash>",
|
||||
usage: `Remove a node
|
||||
Example of usage:
|
||||
remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
|
||||
},
|
||||
|
||||
"rm": {
|
||||
descriprion: "Remove a node: rm <node-hash>",
|
||||
usage: `Remove a node
|
||||
Example of usage:
|
||||
rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
|
||||
},
|
||||
|
||||
"eval": {
|
||||
descriprion: "Evaluate a policy: eval <policy>",
|
||||
usage: `Evaluate a policy
|
||||
Example of usage:
|
||||
eval REP 2`,
|
||||
},
|
||||
|
||||
"help": {
|
||||
descriprion: "Show available commands",
|
||||
},
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleCommand(args []string) error {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch args[0] {
|
||||
case "list", "ls":
|
||||
return repl.handleLs(args[1:])
|
||||
case "add":
|
||||
return repl.handleAdd(args[1:])
|
||||
case "load":
|
||||
return repl.handleLoad(args[1:])
|
||||
case "remove", "rm":
|
||||
return repl.handleRemove(args[1:])
|
||||
case "eval":
|
||||
return repl.handleEval(args[1:])
|
||||
case "help":
|
||||
return repl.handleHelp(args[1:])
|
||||
}
|
||||
return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0])
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) run() error {
|
||||
if len(viper.GetString(commonflags.RPC)) > 0 {
|
||||
|
@ -190,24 +299,32 @@ func (repl *policyPlaygroundREPL) run() error {
|
|||
}
|
||||
}
|
||||
|
||||
cmdHandlers := map[string]func([]string) error{
|
||||
"list": repl.handleLs,
|
||||
"ls": repl.handleLs,
|
||||
"add": repl.handleAdd,
|
||||
"load": repl.handleLoad,
|
||||
"remove": repl.handleRemove,
|
||||
"rm": repl.handleRemove,
|
||||
"eval": repl.handleEval,
|
||||
if len(viper.GetString(netmapConfigPath)) > 0 {
|
||||
err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)})
|
||||
commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err)
|
||||
}
|
||||
|
||||
var cfgCompleter []readline.PrefixCompleterInterface
|
||||
var helpSubItems []readline.PrefixCompleterInterface
|
||||
|
||||
for name := range commands {
|
||||
if name != "help" {
|
||||
cfgCompleter = append(cfgCompleter, readline.PcItem(name))
|
||||
helpSubItems = append(helpSubItems, readline.PcItem(name))
|
||||
}
|
||||
}
|
||||
|
||||
cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...))
|
||||
completer := readline.NewPrefixCompleter(cfgCompleter...)
|
||||
rl, err := readline.NewEx(&readline.Config{
|
||||
Prompt: "> ",
|
||||
InterruptPrompt: "^C",
|
||||
AutoComplete: policyPlaygroundCompleter,
|
||||
AutoComplete: completer,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing readline: %w", err)
|
||||
}
|
||||
repl.console = rl
|
||||
defer rl.Close()
|
||||
|
||||
var exit bool
|
||||
|
@ -225,17 +342,8 @@ func (repl *policyPlaygroundREPL) run() error {
|
|||
}
|
||||
exit = false
|
||||
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) == 0 {
|
||||
continue
|
||||
}
|
||||
cmd := parts[0]
|
||||
if handler, exists := cmdHandlers[cmd]; exists {
|
||||
if err := handler(parts[1:]); err != nil {
|
||||
fmt.Printf("error: %v\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("error: unknown command %q\n", cmd)
|
||||
if err := repl.handleCommand(strings.Fields(line)); err != nil {
|
||||
fmt.Fprintf(repl.console, "error: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -251,6 +359,14 @@ If a wallet and endpoint is provided, the initial netmap data will be loaded fro
|
|||
},
|
||||
}
|
||||
|
||||
const (
|
||||
netmapConfigPath = "netmap-config"
|
||||
netmapConfigUsage = "Path to the netmap configuration file"
|
||||
)
|
||||
|
||||
func initContainerPolicyPlaygroundCmd() {
|
||||
commonflags.Init(policyPlaygroundCmd)
|
||||
policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage)
|
||||
|
||||
_ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath))
|
||||
}
|
||||
|
|
|
@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
|
|||
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
|
||||
leftMinutes := int(leftSeconds / 60)
|
||||
|
||||
sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
|
||||
fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes)
|
||||
}
|
||||
|
||||
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
|
@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
|
|||
hour := int(duration.Seconds() / 3600)
|
||||
minute := int(duration.Seconds()/60) % 60
|
||||
second := int(duration.Seconds()) % 60
|
||||
sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
|
||||
fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second)
|
||||
}
|
||||
}
|
||||
|
||||
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if resp.GetBody().GetStartedAt() != nil {
|
||||
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
|
||||
sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
|
||||
fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
|
||||
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if len(resp.GetBody().GetErrorMessage()) > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
|
||||
fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
|
|||
default:
|
||||
status = "undefined"
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf(" Status: %s.", status))
|
||||
fmt.Fprintf(sb, " Status: %s.", status)
|
||||
}
|
||||
|
||||
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
|
@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
|
|||
}
|
||||
|
||||
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
|
||||
fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
|
||||
resp.GetBody().GetEvacuatedObjects(),
|
||||
resp.GetBody().GetTotalObjects(),
|
||||
resp.GetBody().GetFailedObjects(),
|
||||
resp.GetBody().GetSkippedObjects(),
|
||||
resp.GetBody().GetEvacuatedTrees(),
|
||||
resp.GetBody().GetTotalTrees(),
|
||||
resp.GetBody().GetFailedTrees()))
|
||||
resp.GetBody().GetFailedTrees())
|
||||
}
|
||||
|
||||
func initControlEvacuationShardCmd() {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// object lock command.
|
||||
|
@ -78,7 +79,7 @@ var objectLockCmd = &cobra.Command{
|
|||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
|
||||
defer cancel()
|
||||
|
||||
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
|
||||
endpoint := viper.GetString(commonflags.RPC)
|
||||
|
||||
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
|
||||
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)
|
||||
|
|
|
@ -48,6 +48,12 @@ type ecHeader struct {
|
|||
parent oid.ID
|
||||
}
|
||||
|
||||
type objectCounter struct {
|
||||
sync.Mutex
|
||||
total uint32
|
||||
isECcounted bool
|
||||
}
|
||||
|
||||
type objectPlacement struct {
|
||||
requiredNodes []netmapSDK.NodeInfo
|
||||
confirmedNodes []netmapSDK.NodeInfo
|
||||
|
@ -56,6 +62,7 @@ type objectPlacement struct {
|
|||
type objectNodesResult struct {
|
||||
errors []error
|
||||
placements map[oid.ID]objectPlacement
|
||||
total uint32
|
||||
}
|
||||
|
||||
type ObjNodesDataObject struct {
|
||||
|
@ -106,18 +113,18 @@ func objectNodes(cmd *cobra.Command, _ []string) {
|
|||
pk := key.GetOrGenerate(cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
|
||||
objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk)
|
||||
|
||||
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
|
||||
|
||||
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
|
||||
|
||||
getActualPlacement(cmd, netmap, pk, objects, result)
|
||||
getActualPlacement(cmd, netmap, pk, objects, count, result)
|
||||
|
||||
printPlacement(cmd, objID, objects, result)
|
||||
}
|
||||
|
||||
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
|
||||
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) {
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(cnrID)
|
||||
addrObj.SetObject(objID)
|
||||
|
@ -145,7 +152,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
|
|||
parent: res.Header().ECHeader().Parent(),
|
||||
}
|
||||
}
|
||||
return []phyObject{obj}
|
||||
return []phyObject{obj}, 1
|
||||
}
|
||||
|
||||
var errSplitInfo *objectSDK.SplitInfoError
|
||||
|
@ -155,29 +162,34 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
|
|||
|
||||
var ecInfoError *objectSDK.ECInfoError
|
||||
if errors.As(err, &ecInfoError) {
|
||||
return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
|
||||
return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1
|
||||
}
|
||||
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
|
||||
members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
|
||||
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
|
||||
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) {
|
||||
members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
|
||||
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total
|
||||
}
|
||||
|
||||
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
|
||||
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) {
|
||||
var total int
|
||||
splitInfo := errSplitInfo.SplitInfo()
|
||||
|
||||
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
|
||||
return members
|
||||
if total = len(members); total > 0 {
|
||||
total-- // linking object is not data object
|
||||
}
|
||||
return members, total
|
||||
}
|
||||
|
||||
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
|
||||
return members
|
||||
return members, len(members)
|
||||
}
|
||||
|
||||
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
|
||||
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
|
||||
return members, len(members)
|
||||
}
|
||||
|
||||
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
|
||||
|
@ -383,8 +395,11 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
|
|||
}
|
||||
}
|
||||
|
||||
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
|
||||
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) {
|
||||
resultMtx := &sync.Mutex{}
|
||||
counter := &objectCounter{
|
||||
total: uint32(count),
|
||||
}
|
||||
|
||||
candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
|
||||
|
||||
|
@ -401,7 +416,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
|
|||
|
||||
for _, object := range objects {
|
||||
eg.Go(func() error {
|
||||
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
|
||||
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter)
|
||||
resultMtx.Lock()
|
||||
defer resultMtx.Unlock()
|
||||
if err == nil && stored {
|
||||
|
@ -420,6 +435,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
|
|||
}
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
|
||||
result.total = counter.total
|
||||
}
|
||||
|
||||
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
|
||||
|
@ -478,7 +494,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
|
|||
return cli, nil
|
||||
}
|
||||
|
||||
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
|
||||
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) {
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(cnrID)
|
||||
addrObj.SetObject(objID)
|
||||
|
@ -493,6 +509,14 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
|
|||
|
||||
res, err := internalclient.HeadObject(ctx, prmHead)
|
||||
if err == nil && res != nil {
|
||||
if res.Header().ECHeader() != nil {
|
||||
counter.Lock()
|
||||
defer counter.Unlock()
|
||||
if !counter.isECcounted {
|
||||
counter.total *= res.Header().ECHeader().Total()
|
||||
}
|
||||
counter.isECcounted = true
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
var notFound *apistatus.ObjectNotFound
|
||||
|
@ -512,7 +536,8 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
|
|||
}
|
||||
|
||||
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total)
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
|
||||
|
||||
for _, object := range objects {
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)
|
||||
|
|
|
@ -2,6 +2,7 @@ package object
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
@ -9,6 +10,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -20,6 +22,7 @@ const (
|
|||
replaceAttrsFlagName = "replace-attrs"
|
||||
rangeFlagName = "range"
|
||||
payloadFlagName = "payload"
|
||||
splitHeaderFlagName = "split-header"
|
||||
)
|
||||
|
||||
var objectPatchCmd = &cobra.Command{
|
||||
|
@ -50,6 +53,7 @@ func initObjectPatchCmd() {
|
|||
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
|
||||
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
|
||||
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
|
||||
flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
|
||||
}
|
||||
|
||||
func patch(cmd *cobra.Command, _ []string) {
|
||||
|
@ -84,6 +88,8 @@ func patch(cmd *cobra.Command, _ []string) {
|
|||
prm.NewAttributes = newAttrs
|
||||
prm.ReplaceAttribute = replaceAttrs
|
||||
|
||||
prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
|
||||
|
||||
for i := range ranges {
|
||||
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
|
||||
Range: ranges[i],
|
||||
|
@ -147,3 +153,22 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
|
|||
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
|
||||
return v
|
||||
}
|
||||
|
||||
func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
|
||||
path, _ := cmd.Flags().GetString(splitHeaderFlagName)
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
commonCmd.ExitOnErr(cmd, "read file error: %w", err)
|
||||
|
||||
splitHdrV2 := new(objectV2.SplitHeader)
|
||||
err = splitHdrV2.Unmarshal(data)
|
||||
if err != nil {
|
||||
err = splitHdrV2.UnmarshalJSON(data)
|
||||
commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
|
||||
}
|
||||
|
||||
return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool {
|
|||
if ok {
|
||||
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
|
||||
toProto, _ := cmd.Flags().GetBool("proto")
|
||||
if !(toJSON || toProto) {
|
||||
if !toJSON && !toProto {
|
||||
cmd.PrintErrln("Object is erasure-encoded, ec information received.")
|
||||
}
|
||||
printECInfo(cmd, errECInfo.ECInfo())
|
||||
|
|
|
@ -2,17 +2,19 @@ package tree
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
|
@ -31,22 +33,29 @@ func _client() (tree.TreeServiceClient, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
host, isTLS, err := client.ParseURI(netAddr.URIAddr())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
creds := insecure.NewCredentials()
|
||||
if isTLS {
|
||||
creds = credentials.NewTLS(&tls.Config{})
|
||||
}
|
||||
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithChainUnaryInterceptor(
|
||||
tracing.NewUnaryClientInteceptor(),
|
||||
tracing.NewUnaryClientInterceptor(),
|
||||
),
|
||||
grpc.WithChainStreamInterceptor(
|
||||
tracing.NewStreamClientInterceptor(),
|
||||
),
|
||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||
grpc.WithDisableServiceConfig(),
|
||||
grpc.WithTransportCredentials(creds),
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
|
||||
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
}
|
||||
|
||||
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
|
||||
cc, err := grpc.NewClient(host, opts...)
|
||||
return tree.NewTreeServiceClient(cc), err
|
||||
}
|
||||
|
||||
|
|
|
@ -4,11 +4,14 @@ import (
|
|||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/spf13/cast"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -38,13 +41,33 @@ func reloadConfig() error {
|
|||
}
|
||||
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
||||
audit.Store(cfg.GetBool("audit.enabled"))
|
||||
var logPrm logger.Prm
|
||||
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
||||
err = logPrm.SetTags(loggerTags())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.UpdateLevelForTags(logPrm)
|
||||
|
||||
return logPrm.Reload()
|
||||
return nil
|
||||
}
|
||||
|
||||
func loggerTags() [][]string {
|
||||
var res [][]string
|
||||
for i := 0; ; i++ {
|
||||
var item []string
|
||||
index := strconv.FormatInt(int64(i), 10)
|
||||
names := cast.ToString(cfg.Get("logger.tags." + index + ".names"))
|
||||
if names == "" {
|
||||
break
|
||||
}
|
||||
item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level")))
|
||||
res = append(res, item)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func watchForSignal(ctx context.Context, cancel func()) {
|
||||
|
|
|
@ -31,7 +31,6 @@ const (
|
|||
var (
|
||||
wg = new(sync.WaitGroup)
|
||||
intErr = make(chan error) // internal inner ring errors
|
||||
logPrm = new(logger.Prm)
|
||||
innerRing *innerring.Server
|
||||
pprofCmp *pprofComponent
|
||||
metricsCmp *httpComponent
|
||||
|
@ -70,6 +69,7 @@ func main() {
|
|||
|
||||
metrics := irMetrics.NewInnerRingMetrics()
|
||||
|
||||
var logPrm logger.Prm
|
||||
err = logPrm.SetLevelString(
|
||||
cfg.GetString("logger.level"),
|
||||
)
|
||||
|
@ -80,10 +80,14 @@ func main() {
|
|||
exitErr(err)
|
||||
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
|
||||
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
||||
err = logPrm.SetTags(loggerTags())
|
||||
exitErr(err)
|
||||
|
||||
log, err = logger.NewLogger(logPrm)
|
||||
exitErr(err)
|
||||
|
||||
logger.UpdateLevelForTags(logPrm)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
pprofCmp = newPprofComponent()
|
||||
|
|
|
@ -3,6 +3,8 @@ package common
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
)
|
||||
|
||||
type FilterResult byte
|
||||
|
@ -71,11 +73,7 @@ func (fp FallbackParser) ToParser() Parser {
|
|||
func (p Parser) ToFallbackParser() FallbackParser {
|
||||
return func(key, value []byte) (SchemaEntry, Parser) {
|
||||
entry, next, err := p(key, value)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf(
|
||||
"couldn't use that parser as a fallback parser, it returned an error: %w", err,
|
||||
))
|
||||
}
|
||||
assert.NoError(err, "couldn't use that parser as a fallback parser")
|
||||
return entry, next
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
|
|||
f.historyPointer++
|
||||
// Stop iterating over history.
|
||||
if f.historyPointer == len(f.history) {
|
||||
f.InputField.SetText(f.currentContent)
|
||||
f.SetText(f.currentContent)
|
||||
return
|
||||
}
|
||||
f.InputField.SetText(f.history[f.historyPointer])
|
||||
f.SetText(f.history[f.historyPointer])
|
||||
case tcell.KeyUp:
|
||||
if len(f.history) == 0 {
|
||||
return
|
||||
}
|
||||
// Start iterating over history.
|
||||
if f.historyPointer == len(f.history) {
|
||||
f.currentContent = f.InputField.GetText()
|
||||
f.currentContent = f.GetText()
|
||||
}
|
||||
// End of history.
|
||||
if f.historyPointer == 0 {
|
||||
|
@ -71,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
|
|||
}
|
||||
// Iterate to least recent prompts.
|
||||
f.historyPointer--
|
||||
f.InputField.SetText(f.history[f.historyPointer])
|
||||
f.SetText(f.history[f.historyPointer])
|
||||
default:
|
||||
f.InputField.InputHandler()(event, func(tview.Primitive) {})
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
"github.com/rivo/tview"
|
||||
)
|
||||
|
@ -94,9 +95,7 @@ func (v *RecordsView) Mount(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (v *RecordsView) Unmount() {
|
||||
if v.onUnmount == nil {
|
||||
panic("try to unmount not mounted component")
|
||||
}
|
||||
assert.False(v.onUnmount == nil, "try to unmount not mounted component")
|
||||
v.onUnmount()
|
||||
v.onUnmount = nil
|
||||
}
|
||||
|
|
|
@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
|
|||
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
|
||||
}
|
||||
|
||||
ui.Box.MouseHandler()
|
||||
ui.MouseHandler()
|
||||
}
|
||||
|
||||
func (ui *UI) WithPrompt(prompt string) error {
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
func initAPEManagerService(c *cfg) {
|
||||
contractStorage := ape_contract.NewProxyVerificationContractStorage(
|
||||
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
|
||||
c.shared.key,
|
||||
c.key,
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
|
||||
|
||||
|
|
|
@ -1,20 +1,27 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||
"github.com/hashicorp/golang-lru/v2/simplelru"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
|
||||
|
@ -110,55 +117,6 @@ func (c *ttlNetCache[K, V]) remove(key K) {
|
|||
hit = c.cache.Remove(key)
|
||||
}
|
||||
|
||||
// entity that provides LRU cache interface.
|
||||
type lruNetCache struct {
|
||||
cache *lru.Cache[uint64, *netmapSDK.NetMap]
|
||||
|
||||
netRdr netValueReader[uint64, *netmapSDK.NetMap]
|
||||
|
||||
metrics cacheMetrics
|
||||
}
|
||||
|
||||
// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
|
||||
func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache {
|
||||
cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
|
||||
fatalOnErr(err)
|
||||
|
||||
return &lruNetCache{
|
||||
cache: cache,
|
||||
netRdr: netRdr,
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// reads value by the key.
|
||||
//
|
||||
// updates the value from the network on cache miss.
|
||||
//
|
||||
// returned value should not be modified.
|
||||
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
|
||||
hit := false
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
|
||||
}()
|
||||
|
||||
val, ok := c.cache.Get(key)
|
||||
if ok {
|
||||
hit = true
|
||||
return val, nil
|
||||
}
|
||||
|
||||
val, err := c.netRdr(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.cache.Add(key, val)
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// wrapper over TTL cache of values read from the network
|
||||
// that implements container storage.
|
||||
type ttlContainerStorage struct {
|
||||
|
@ -200,20 +158,236 @@ func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*con
|
|||
type lruNetmapSource struct {
|
||||
netState netmap.State
|
||||
|
||||
cache *lruNetCache
|
||||
client rawSource
|
||||
cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
|
||||
mtx sync.RWMutex
|
||||
metrics cacheMetrics
|
||||
log *logger.Logger
|
||||
candidates atomic.Pointer[[]netmapSDK.NodeInfo]
|
||||
}
|
||||
|
||||
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
|
||||
type rawSource interface {
|
||||
GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
|
||||
GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
|
||||
}
|
||||
|
||||
func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
|
||||
netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
|
||||
) netmap.Source {
|
||||
const netmapCacheSize = 10
|
||||
|
||||
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
|
||||
return v.GetNetMapByEpoch(ctx, key)
|
||||
}, metrics.NewCacheMetrics("netmap"))
|
||||
cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil)
|
||||
fatalOnErr(err)
|
||||
|
||||
return &lruNetmapSource{
|
||||
netState: s,
|
||||
cache: lruNetmapCache,
|
||||
src := &lruNetmapSource{
|
||||
netState: netState,
|
||||
client: client,
|
||||
cache: cache,
|
||||
log: log,
|
||||
metrics: metrics.NewCacheMetrics("netmap"),
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
src.updateCandidates(ctx, d)
|
||||
}()
|
||||
|
||||
return src
|
||||
}
|
||||
|
||||
// updateCandidates routine to merge netmap in cache with candidates list.
|
||||
func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
newCandidates, err := s.client.GetCandidates(ctx)
|
||||
if err != nil {
|
||||
s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
|
||||
timer.Reset(d)
|
||||
break
|
||||
}
|
||||
if len(newCandidates) == 0 {
|
||||
s.candidates.Store(&newCandidates)
|
||||
timer.Reset(d)
|
||||
break
|
||||
}
|
||||
slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
|
||||
return cmp.Compare(n1.Hash(), n2.Hash())
|
||||
})
|
||||
|
||||
// Check once state changed
|
||||
v := s.candidates.Load()
|
||||
if v == nil {
|
||||
s.candidates.Store(&newCandidates)
|
||||
s.mergeCacheWithCandidates(newCandidates)
|
||||
timer.Reset(d)
|
||||
break
|
||||
}
|
||||
ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
|
||||
if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
|
||||
uint32(n1.Status()) != uint32(n2.Status()) ||
|
||||
slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
|
||||
return 1
|
||||
}
|
||||
var ne1 []string
|
||||
n1.IterateNetworkEndpoints(func(s string) bool {
|
||||
ne1 = append(ne1, s)
|
||||
return false
|
||||
})
|
||||
var ne2 []string
|
||||
n2.IterateNetworkEndpoints(func(s string) bool {
|
||||
ne2 = append(ne2, s)
|
||||
return false
|
||||
})
|
||||
return slices.Compare(ne1, ne2)
|
||||
})
|
||||
if ret != 0 {
|
||||
s.candidates.Store(&newCandidates)
|
||||
s.mergeCacheWithCandidates(newCandidates)
|
||||
}
|
||||
timer.Reset(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) {
|
||||
s.mtx.Lock()
|
||||
tmp := s.cache.Values()
|
||||
s.mtx.Unlock()
|
||||
for _, pointer := range tmp {
|
||||
nm := pointer.Load()
|
||||
updates := getNetMapNodesToUpdate(nm, candidates)
|
||||
if len(updates) > 0 {
|
||||
nm = nm.Clone()
|
||||
mergeNetmapWithCandidates(updates, nm)
|
||||
pointer.Store(nm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reads value by the key.
|
||||
//
|
||||
// updates the value from the network on cache miss.
|
||||
//
|
||||
// returned value should not be modified.
|
||||
func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
|
||||
hit := false
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
|
||||
}()
|
||||
|
||||
s.mtx.RLock()
|
||||
val, ok := s.cache.Get(key)
|
||||
s.mtx.RUnlock()
|
||||
if ok {
|
||||
hit = true
|
||||
return val.Load(), nil
|
||||
}
|
||||
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
val, ok = s.cache.Get(key)
|
||||
if ok {
|
||||
hit = true
|
||||
return val.Load(), nil
|
||||
}
|
||||
|
||||
nm, err := s.client.GetNetMapByEpoch(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := s.candidates.Load()
|
||||
if v != nil {
|
||||
updates := getNetMapNodesToUpdate(nm, *v)
|
||||
if len(updates) > 0 {
|
||||
mergeNetmapWithCandidates(updates, nm)
|
||||
}
|
||||
}
|
||||
|
||||
p := atomic.Pointer[netmapSDK.NetMap]{}
|
||||
p.Store(nm)
|
||||
s.cache.Add(key, &p)
|
||||
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
|
||||
func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
|
||||
for _, v := range updates {
|
||||
if v.status != netmapSDK.UnspecifiedState {
|
||||
nm.Nodes()[v.netmapIndex].SetStatus(v.status)
|
||||
}
|
||||
if v.externalAddresses != nil {
|
||||
nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
|
||||
}
|
||||
if v.endpoints != nil {
|
||||
nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type nodeToUpdate struct {
|
||||
netmapIndex int
|
||||
status netmapSDK.NodeState
|
||||
externalAddresses []string
|
||||
endpoints []string
|
||||
}
|
||||
|
||||
// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
|
||||
func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
|
||||
var res []nodeToUpdate
|
||||
for i := range nm.Nodes() {
|
||||
for _, cnd := range candidates {
|
||||
if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
|
||||
var tmp nodeToUpdate
|
||||
var update bool
|
||||
|
||||
if cnd.Status() != nm.Nodes()[i].Status() &&
|
||||
(cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
|
||||
update = true
|
||||
tmp.status = cnd.Status()
|
||||
}
|
||||
|
||||
externalAddresses := cnd.ExternalAddresses()
|
||||
if externalAddresses != nil &&
|
||||
slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
|
||||
update = true
|
||||
tmp.externalAddresses = externalAddresses
|
||||
}
|
||||
|
||||
nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
|
||||
nm.Nodes()[i].IterateNetworkEndpoints(func(s string) bool {
|
||||
nodeEndpoints = append(nodeEndpoints, s)
|
||||
return false
|
||||
})
|
||||
candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
|
||||
cnd.IterateNetworkEndpoints(func(s string) bool {
|
||||
candidateEndpoints = append(candidateEndpoints, s)
|
||||
return false
|
||||
})
|
||||
if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
|
||||
update = true
|
||||
tmp.endpoints = candidateEndpoints
|
||||
}
|
||||
|
||||
if update {
|
||||
tmp.netmapIndex = i
|
||||
res = append(res, tmp)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
|
||||
|
@ -225,7 +399,7 @@ func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*
|
|||
}
|
||||
|
||||
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
val, err := s.cache.get(ctx, epoch)
|
||||
val, err := s.get(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3,9 +3,11 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -59,3 +61,75 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) {
|
|||
type noopCacheMetricts struct{}
|
||||
|
||||
func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
|
||||
|
||||
type rawSrc struct{}
|
||||
|
||||
func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
|
||||
node0 := netmapSDK.NodeInfo{}
|
||||
node0.SetPublicKey([]byte{byte(1)})
|
||||
node0.SetStatus(netmapSDK.Online)
|
||||
node0.SetExternalAddresses("1", "0")
|
||||
node0.SetNetworkEndpoints("1", "0")
|
||||
|
||||
node1 := netmapSDK.NodeInfo{}
|
||||
node1.SetPublicKey([]byte{byte(1)})
|
||||
node1.SetStatus(netmapSDK.Online)
|
||||
node1.SetExternalAddresses("1", "0")
|
||||
node1.SetNetworkEndpoints("1", "0")
|
||||
|
||||
return []netmapSDK.NodeInfo{node0, node1}, nil
|
||||
}
|
||||
|
||||
func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
nm := netmapSDK.NetMap{}
|
||||
nm.SetEpoch(1)
|
||||
|
||||
node0 := netmapSDK.NodeInfo{}
|
||||
node0.SetPublicKey([]byte{byte(1)})
|
||||
node0.SetStatus(netmapSDK.Maintenance)
|
||||
node0.SetExternalAddresses("0")
|
||||
node0.SetNetworkEndpoints("0")
|
||||
|
||||
node1 := netmapSDK.NodeInfo{}
|
||||
node1.SetPublicKey([]byte{byte(1)})
|
||||
node1.SetStatus(netmapSDK.Maintenance)
|
||||
node1.SetExternalAddresses("0")
|
||||
node1.SetNetworkEndpoints("0")
|
||||
|
||||
nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
|
||||
|
||||
return &nm, nil
|
||||
}
|
||||
|
||||
type st struct{}
|
||||
|
||||
func (s *st) CurrentEpoch() uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func TestNetmapStorage(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := sync.WaitGroup{}
|
||||
cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
|
||||
|
||||
nm, err := cache.GetNetMapByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
|
||||
require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
|
||||
require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
nm, err := cache.GetNetMapByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
for _, node := range nm.Nodes() {
|
||||
if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
|
||||
node.NumberOfNetworkEndpoints() == 2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, time.Second*5, time.Millisecond*10)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
|
|
|
@ -108,6 +108,8 @@ type applicationConfiguration struct {
|
|||
level string
|
||||
destination string
|
||||
timestamp bool
|
||||
options []zap.Option
|
||||
tags [][]string
|
||||
}
|
||||
|
||||
ObjectCfg struct {
|
||||
|
@ -232,6 +234,15 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
a.LoggerCfg.level = loggerconfig.Level(c)
|
||||
a.LoggerCfg.destination = loggerconfig.Destination(c)
|
||||
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
|
||||
var opts []zap.Option
|
||||
if loggerconfig.ToLokiConfig(c).Enabled {
|
||||
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
|
||||
return lokiCore
|
||||
})}
|
||||
}
|
||||
a.LoggerCfg.options = opts
|
||||
a.LoggerCfg.tags = loggerconfig.Tags(c)
|
||||
|
||||
// Object
|
||||
|
||||
|
@ -374,14 +385,11 @@ func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardco
|
|||
}
|
||||
|
||||
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
|
||||
limitsConfig := source.Limits()
|
||||
limitsConfig := source.Limits().ToConfig()
|
||||
limiter, err := qos.NewLimiter(limitsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if target.limiter != nil {
|
||||
target.limiter.Close()
|
||||
}
|
||||
target.limiter = limiter
|
||||
return nil
|
||||
}
|
||||
|
@ -473,7 +481,6 @@ type shared struct {
|
|||
// dynamicConfiguration stores parameters of the
|
||||
// components that supports runtime reconfigurations.
|
||||
type dynamicConfiguration struct {
|
||||
logger *logger.Prm
|
||||
pprof *httpComponent
|
||||
metrics *httpComponent
|
||||
}
|
||||
|
@ -714,16 +721,12 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
|
||||
netState.metrics = c.metricsCollector
|
||||
|
||||
logPrm := c.loggerPrm()
|
||||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||
return lokiCore
|
||||
}))
|
||||
}
|
||||
logger.UpdateLevelForTags(logPrm)
|
||||
|
||||
c.internals = initInternals(appCfg, log)
|
||||
|
||||
|
@ -1076,26 +1079,28 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
|||
return sh
|
||||
}
|
||||
|
||||
func (c *cfg) loggerPrm() *logger.Prm {
|
||||
// check if it has been inited before
|
||||
if c.dynamicConfiguration.logger == nil {
|
||||
c.dynamicConfiguration.logger = new(logger.Prm)
|
||||
}
|
||||
|
||||
func (c *cfg) loggerPrm() (logger.Prm, error) {
|
||||
var prm logger.Prm
|
||||
// (re)init read configuration
|
||||
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
|
||||
err := prm.SetLevelString(c.LoggerCfg.level)
|
||||
if err != nil {
|
||||
// not expected since validation should be performed before
|
||||
panic("incorrect log level format: " + c.LoggerCfg.level)
|
||||
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
|
||||
}
|
||||
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
|
||||
err = prm.SetDestination(c.LoggerCfg.destination)
|
||||
if err != nil {
|
||||
// not expected since validation should be performed before
|
||||
panic("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||
}
|
||||
prm.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
prm.Options = c.LoggerCfg.options
|
||||
err = prm.SetTags(c.LoggerCfg.tags)
|
||||
if err != nil {
|
||||
// not expected since validation should be performed before
|
||||
return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination)
|
||||
}
|
||||
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
|
||||
return c.dynamicConfiguration.logger
|
||||
return prm, nil
|
||||
}
|
||||
|
||||
func (c *cfg) LocalAddress() network.AddressGroup {
|
||||
|
@ -1335,11 +1340,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
// all the components are expected to support
|
||||
// Logger's dynamic reconfiguration approach
|
||||
|
||||
// Logger
|
||||
|
||||
logPrm := c.loggerPrm()
|
||||
|
||||
components := c.getComponents(ctx, logPrm)
|
||||
components := c.getComponents(ctx)
|
||||
|
||||
// Object
|
||||
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
|
||||
|
@ -1377,10 +1378,17 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||
func (c *cfg) getComponents(ctx context.Context) []dCmp {
|
||||
var components []dCmp
|
||||
|
||||
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||
components = append(components, dCmp{"logger", func() error {
|
||||
prm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.UpdateLevelForTags(prm)
|
||||
return nil
|
||||
}})
|
||||
components = append(components, dCmp{"runtime", func() error {
|
||||
setRuntimeParameters(ctx, c)
|
||||
return nil
|
||||
|
|
|
@ -11,10 +11,10 @@ import (
|
|||
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
|
||||
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -135,8 +135,8 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, mode.ReadOnly, sc.Mode())
|
||||
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
|
||||
|
||||
readLimits := limits.Read()
|
||||
writeLimits := limits.Write()
|
||||
readLimits := limits.ToConfig().Read
|
||||
writeLimits := limits.ToConfig().Write
|
||||
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
|
||||
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
|
||||
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
|
||||
|
@ -144,7 +144,7 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
|
||||
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
|
||||
require.ElementsMatch(t, readLimits.Tags,
|
||||
[]limitsconfig.IOTagConfig{
|
||||
[]qos.IOTagConfig{
|
||||
{
|
||||
Tag: "internal",
|
||||
Weight: toPtr(20),
|
||||
|
@ -168,13 +168,19 @@ func TestEngineSection(t *testing.T) {
|
|||
LimitOps: toPtr(25000),
|
||||
},
|
||||
{
|
||||
Tag: "policer",
|
||||
Tag: "policer",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(25000),
|
||||
Prohibited: true,
|
||||
},
|
||||
{
|
||||
Tag: "treesync",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(25000),
|
||||
LimitOps: toPtr(25),
|
||||
},
|
||||
})
|
||||
require.ElementsMatch(t, writeLimits.Tags,
|
||||
[]limitsconfig.IOTagConfig{
|
||||
[]qos.IOTagConfig{
|
||||
{
|
||||
Tag: "internal",
|
||||
Weight: toPtr(200),
|
||||
|
@ -202,6 +208,11 @@ func TestEngineSection(t *testing.T) {
|
|||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(2500),
|
||||
},
|
||||
{
|
||||
Tag: "treesync",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(100),
|
||||
},
|
||||
})
|
||||
case 1:
|
||||
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
|
||||
|
@ -258,14 +269,14 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, mode.ReadWrite, sc.Mode())
|
||||
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
|
||||
|
||||
readLimits := limits.Read()
|
||||
writeLimits := limits.Write()
|
||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
|
||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
|
||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
|
||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
|
||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
|
||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
|
||||
readLimits := limits.ToConfig().Read
|
||||
writeLimits := limits.ToConfig().Write
|
||||
require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout)
|
||||
require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps)
|
||||
require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps)
|
||||
require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout)
|
||||
require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps)
|
||||
require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps)
|
||||
require.Equal(t, 0, len(readLimits.Tags))
|
||||
require.Equal(t, 0, len(writeLimits.Tags))
|
||||
}
|
||||
|
|
|
@ -1,19 +1,13 @@
|
|||
package limits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
const (
|
||||
NoLimit int64 = math.MaxInt64
|
||||
DefaultIdleTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// From wraps config section into Config.
|
||||
func From(c *config.Config) *Config {
|
||||
return (*Config)(c)
|
||||
|
@ -23,36 +17,43 @@ func From(c *config.Config) *Config {
|
|||
// which provides access to Shard's limits configurations.
|
||||
type Config config.Config
|
||||
|
||||
// Read returns the value of "read" limits config section.
|
||||
func (x *Config) Read() OpConfig {
|
||||
func (x *Config) ToConfig() qos.LimiterConfig {
|
||||
result := qos.LimiterConfig{
|
||||
Read: x.read(),
|
||||
Write: x.write(),
|
||||
}
|
||||
panicOnErr(result.Validate())
|
||||
return result
|
||||
}
|
||||
|
||||
func (x *Config) read() qos.OpConfig {
|
||||
return x.parse("read")
|
||||
}
|
||||
|
||||
// Write returns the value of "write" limits config section.
|
||||
func (x *Config) Write() OpConfig {
|
||||
func (x *Config) write() qos.OpConfig {
|
||||
return x.parse("write")
|
||||
}
|
||||
|
||||
func (x *Config) parse(sub string) OpConfig {
|
||||
func (x *Config) parse(sub string) qos.OpConfig {
|
||||
c := (*config.Config)(x).Sub(sub)
|
||||
var result OpConfig
|
||||
var result qos.OpConfig
|
||||
|
||||
if s := config.Int(c, "max_waiting_ops"); s > 0 {
|
||||
result.MaxWaitingOps = s
|
||||
} else {
|
||||
result.MaxWaitingOps = NoLimit
|
||||
result.MaxWaitingOps = qos.NoLimit
|
||||
}
|
||||
|
||||
if s := config.Int(c, "max_running_ops"); s > 0 {
|
||||
result.MaxRunningOps = s
|
||||
} else {
|
||||
result.MaxRunningOps = NoLimit
|
||||
result.MaxRunningOps = qos.NoLimit
|
||||
}
|
||||
|
||||
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
|
||||
result.IdleTimeout = s
|
||||
} else {
|
||||
result.IdleTimeout = DefaultIdleTimeout
|
||||
result.IdleTimeout = qos.DefaultIdleTimeout
|
||||
}
|
||||
|
||||
result.Tags = tags(c)
|
||||
|
@ -60,42 +61,16 @@ func (x *Config) parse(sub string) OpConfig {
|
|||
return result
|
||||
}
|
||||
|
||||
type OpConfig struct {
|
||||
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
|
||||
//
|
||||
// Equals NoLimit if the value is not a positive number.
|
||||
MaxWaitingOps int64
|
||||
// MaxRunningOps returns the value of "max_running_ops" config parameter.
|
||||
//
|
||||
// Equals NoLimit if the value is not a positive number.
|
||||
MaxRunningOps int64
|
||||
// IdleTimeout returns the value of "idle_timeout" config parameter.
|
||||
//
|
||||
// Equals DefaultIdleTimeout if the value is not a valid duration.
|
||||
IdleTimeout time.Duration
|
||||
// Tags returns the value of "tags" config parameter.
|
||||
//
|
||||
// Equals nil if the value is not a valid tags config slice.
|
||||
Tags []IOTagConfig
|
||||
}
|
||||
|
||||
type IOTagConfig struct {
|
||||
Tag string
|
||||
Weight *float64
|
||||
LimitOps *float64
|
||||
ReservedOps *float64
|
||||
}
|
||||
|
||||
func tags(c *config.Config) []IOTagConfig {
|
||||
func tags(c *config.Config) []qos.IOTagConfig {
|
||||
c = c.Sub("tags")
|
||||
var result []IOTagConfig
|
||||
var result []qos.IOTagConfig
|
||||
for i := 0; ; i++ {
|
||||
tag := config.String(c, strconv.Itoa(i)+".tag")
|
||||
if tag == "" {
|
||||
return result
|
||||
}
|
||||
|
||||
var tagConfig IOTagConfig
|
||||
var tagConfig qos.IOTagConfig
|
||||
tagConfig.Tag = tag
|
||||
|
||||
v := c.Value(strconv.Itoa(i) + ".weight")
|
||||
|
@ -119,6 +94,13 @@ func tags(c *config.Config) []IOTagConfig {
|
|||
tagConfig.ReservedOps = &r
|
||||
}
|
||||
|
||||
v = c.Value(strconv.Itoa(i) + ".prohibited")
|
||||
if v != nil {
|
||||
r, err := cast.ToBoolE(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.Prohibited = r
|
||||
}
|
||||
|
||||
result = append(result, tagConfig)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package loggerconfig
|
|||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
|
@ -60,6 +61,21 @@ func Timestamp(c *config.Config) bool {
|
|||
return config.BoolSafe(c.Sub(subsection), "timestamp")
|
||||
}
|
||||
|
||||
// Tags returns the value of "tags" config parameter from "logger" section.
|
||||
func Tags(c *config.Config) [][]string {
|
||||
var res [][]string
|
||||
sub := c.Sub(subsection).Sub("tags")
|
||||
for i := 0; ; i++ {
|
||||
s := sub.Sub(strconv.FormatInt(int64(i), 10))
|
||||
names := config.StringSafe(s, "names")
|
||||
if names == "" {
|
||||
break
|
||||
}
|
||||
res = append(res, []string{names, config.StringSafe(s, "level")})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// ToLokiConfig extracts loki config.
|
||||
func ToLokiConfig(c *config.Config) loki.Config {
|
||||
hostname, _ := os.Hostname()
|
||||
|
|
|
@ -33,6 +33,9 @@ const (
|
|||
|
||||
// ContainerCacheSizeDefault represents the default size for the container cache.
|
||||
ContainerCacheSizeDefault = 100
|
||||
|
||||
// PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates.
|
||||
PollCandidatesTimeoutDefault = 20 * time.Second
|
||||
)
|
||||
|
||||
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
|
||||
|
@ -154,3 +157,17 @@ func FrostfsIDCacheSize(c *config.Config) uint32 {
|
|||
}
|
||||
return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size")
|
||||
}
|
||||
|
||||
// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter
|
||||
// from "morph" section.
|
||||
//
|
||||
// Returns PollCandidatesTimeoutDefault if the value is not positive duration.
|
||||
func NetmapCandidatesPollInterval(c *config.Config) time.Duration {
|
||||
v := config.DurationSafe(c.Sub(subsection).
|
||||
Sub("netmap").Sub("candidates"), "poll_interval")
|
||||
if v > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return PollCandidatesTimeoutDefault
|
||||
}
|
||||
|
|
|
@ -31,12 +31,11 @@ func Limits(c *config.Config) []LimitConfig {
|
|||
break
|
||||
}
|
||||
|
||||
maxOps := config.IntSafe(sc, "max_ops")
|
||||
if maxOps == 0 {
|
||||
if sc.Value("max_ops") == nil {
|
||||
panic("no max operations for method group")
|
||||
}
|
||||
|
||||
limits = append(limits, LimitConfig{methods, maxOps})
|
||||
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
|
||||
}
|
||||
|
||||
return limits
|
||||
|
|
|
@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("no max operations", func(t *testing.T) {
|
||||
const path = "testdata/node"
|
||||
const path = "testdata/no_max_ops"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Panics(t, func() { _ = Limits(c) })
|
||||
|
@ -50,4 +50,28 @@ func TestRPCSection(t *testing.T) {
|
|||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("zero max operations", func(t *testing.T) {
|
||||
const path = "testdata/zero_max_ops"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
limits := Limits(c)
|
||||
require.Len(t, limits, 2)
|
||||
|
||||
limit0 := limits[0]
|
||||
limit1 := limits[1]
|
||||
|
||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
||||
require.Equal(t, limit0.MaxOps, int64(0))
|
||||
|
||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
4
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
vendored
Normal file
4
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
||||
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
|
||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
19
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
vendored
Normal file
19
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"rpc": {
|
||||
"limits": [
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
||||
"/neo.fs.v2.object.ObjectService/Put"
|
||||
],
|
||||
"max_ops": 0
|
||||
},
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/Get"
|
||||
],
|
||||
"max_ops": 10000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
9
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
vendored
Normal file
9
cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
rpc:
|
||||
limits:
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
||||
- /neo.fs.v2.object.ObjectService/Put
|
||||
max_ops: 0
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/Get
|
||||
max_ops: 10000
|
|
@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
fatalOnErr(err)
|
||||
|
||||
c.shared.cnrClient = wrap
|
||||
c.cnrClient = wrap
|
||||
|
||||
cnrSrc := cntClient.AsContainerSource(wrap)
|
||||
|
||||
|
@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
|
||||
}
|
||||
|
||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||
c.frostfsidClient = frostfsIDSubjectProvider
|
||||
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
|
||||
|
||||
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
|
||||
|
@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
service := containerService.NewSignService(
|
||||
&c.key.PrivateKey,
|
||||
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
|
||||
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
|
||||
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
|
||||
containerService.NewSplitterService(
|
||||
c.cfgContainer.containerBatchSize, c.respSvc,
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
|
||||
|
|
|
@ -8,38 +8,38 @@ import (
|
|||
func metricsComponent(c *cfg) (*httpComponent, bool) {
|
||||
var updated bool
|
||||
// check if it has been inited before
|
||||
if c.dynamicConfiguration.metrics == nil {
|
||||
c.dynamicConfiguration.metrics = new(httpComponent)
|
||||
c.dynamicConfiguration.metrics.cfg = c
|
||||
c.dynamicConfiguration.metrics.name = "metrics"
|
||||
c.dynamicConfiguration.metrics.handler = metrics.Handler()
|
||||
if c.metrics == nil {
|
||||
c.metrics = new(httpComponent)
|
||||
c.metrics.cfg = c
|
||||
c.metrics.name = "metrics"
|
||||
c.metrics.handler = metrics.Handler()
|
||||
updated = true
|
||||
}
|
||||
|
||||
// (re)init read configuration
|
||||
enabled := metricsconfig.Enabled(c.appCfg)
|
||||
if enabled != c.dynamicConfiguration.metrics.enabled {
|
||||
c.dynamicConfiguration.metrics.enabled = enabled
|
||||
if enabled != c.metrics.enabled {
|
||||
c.metrics.enabled = enabled
|
||||
updated = true
|
||||
}
|
||||
address := metricsconfig.Address(c.appCfg)
|
||||
if address != c.dynamicConfiguration.metrics.address {
|
||||
c.dynamicConfiguration.metrics.address = address
|
||||
if address != c.metrics.address {
|
||||
c.metrics.address = address
|
||||
updated = true
|
||||
}
|
||||
dur := metricsconfig.ShutdownTimeout(c.appCfg)
|
||||
if dur != c.dynamicConfiguration.metrics.shutdownDur {
|
||||
c.dynamicConfiguration.metrics.shutdownDur = dur
|
||||
if dur != c.metrics.shutdownDur {
|
||||
c.metrics.shutdownDur = dur
|
||||
updated = true
|
||||
}
|
||||
|
||||
return c.dynamicConfiguration.metrics, updated
|
||||
return c.metrics, updated
|
||||
}
|
||||
|
||||
func enableMetricsSvc(c *cfg) {
|
||||
c.shared.metricsSvc.Enable()
|
||||
c.metricsSvc.Enable()
|
||||
}
|
||||
|
||||
func disableMetricsSvc(c *cfg) {
|
||||
c.shared.metricsSvc.Disable()
|
||||
c.metricsSvc.Disable()
|
||||
}
|
||||
|
|
|
@ -60,10 +60,11 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
}
|
||||
|
||||
if c.cfgMorph.cacheTTL < 0 {
|
||||
netmapSource = wrap
|
||||
netmapSource = newRawNetmapStorage(wrap)
|
||||
} else {
|
||||
// use RPC node as source of netmap (with caching)
|
||||
netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap)
|
||||
netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg,
|
||||
morphconfig.NetmapCandidatesPollInterval(c.appCfg))
|
||||
}
|
||||
|
||||
c.netMapSource = netmapSource
|
||||
|
|
55
cmd/frostfs-node/netmap_source.go
Normal file
55
cmd/frostfs-node/netmap_source.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
type rawNetmapSource struct {
|
||||
client *netmapClient.Client
|
||||
}
|
||||
|
||||
func newRawNetmapStorage(client *netmapClient.Client) netmap.Source {
|
||||
return &rawNetmapSource{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
|
||||
nm, err := s.client.GetNetMap(ctx, diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidates, err := s.client.GetCandidates(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updates := getNetMapNodesToUpdate(nm, candidates)
|
||||
if len(updates) > 0 {
|
||||
mergeNetmapWithCandidates(updates, nm)
|
||||
}
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
nm, err := s.client.GetNetMapByEpoch(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidates, err := s.client.GetCandidates(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updates := getNetMapNodesToUpdate(nm, candidates)
|
||||
if len(updates) > 0 {
|
||||
mergeNetmapWithCandidates(updates, nm)
|
||||
}
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) {
|
||||
return s.client.Epoch(ctx)
|
||||
}
|
|
@ -16,7 +16,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
|
||||
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
|
||||
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
||||
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
|
||||
|
@ -172,12 +171,10 @@ func initObjectService(c *cfg) {
|
|||
|
||||
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
||||
|
||||
apeSvc := createAPEService(c, splitSvc)
|
||||
|
||||
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
|
||||
apeSvc := createAPEService(c, &irFetcher, splitSvc)
|
||||
|
||||
var commonSvc objectService.Common
|
||||
commonSvc.Init(&c.internals, aclSvc)
|
||||
commonSvc.Init(&c.internals, apeSvc)
|
||||
|
||||
respSvc := objectService.NewResponseService(
|
||||
&commonSvc,
|
||||
|
@ -189,9 +186,9 @@ func initObjectService(c *cfg) {
|
|||
respSvc,
|
||||
)
|
||||
|
||||
c.shared.metricsSvc = objectService.NewMetricCollector(
|
||||
c.metricsSvc = objectService.NewMetricCollector(
|
||||
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
|
||||
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService)
|
||||
qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
|
||||
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
|
||||
server := objectTransportGRPC.New(auditSvc)
|
||||
|
||||
|
@ -284,7 +281,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
})
|
||||
}
|
||||
|
||||
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
|
@ -429,28 +426,19 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
|
|||
)
|
||||
}
|
||||
|
||||
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
|
||||
return v2.New(
|
||||
apeSvc,
|
||||
c.netMapSource,
|
||||
irFetcher,
|
||||
c.cfgObject.cnrSource,
|
||||
v2.WithLogger(c.log),
|
||||
)
|
||||
}
|
||||
|
||||
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
||||
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
||||
return objectAPE.NewService(
|
||||
objectAPE.NewChecker(
|
||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
|
||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
|
||||
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
|
||||
c.shared.frostfsidClient,
|
||||
c.frostfsidClient,
|
||||
c.netMapSource,
|
||||
c.cfgNetmap.state,
|
||||
c.cfgObject.cnrSource,
|
||||
c.binPublicKey,
|
||||
),
|
||||
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
|
||||
splitSvc,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) {
|
|||
func pprofComponent(c *cfg) (*httpComponent, bool) {
|
||||
var updated bool
|
||||
// check if it has been inited before
|
||||
if c.dynamicConfiguration.pprof == nil {
|
||||
c.dynamicConfiguration.pprof = new(httpComponent)
|
||||
c.dynamicConfiguration.pprof.cfg = c
|
||||
c.dynamicConfiguration.pprof.name = "pprof"
|
||||
c.dynamicConfiguration.pprof.handler = httputil.Handler()
|
||||
c.dynamicConfiguration.pprof.preReload = tuneProfilers
|
||||
if c.pprof == nil {
|
||||
c.pprof = new(httpComponent)
|
||||
c.pprof.cfg = c
|
||||
c.pprof.name = "pprof"
|
||||
c.pprof.handler = httputil.Handler()
|
||||
c.pprof.preReload = tuneProfilers
|
||||
updated = true
|
||||
}
|
||||
|
||||
// (re)init read configuration
|
||||
enabled := profilerconfig.Enabled(c.appCfg)
|
||||
if enabled != c.dynamicConfiguration.pprof.enabled {
|
||||
c.dynamicConfiguration.pprof.enabled = enabled
|
||||
if enabled != c.pprof.enabled {
|
||||
c.pprof.enabled = enabled
|
||||
updated = true
|
||||
}
|
||||
address := profilerconfig.Address(c.appCfg)
|
||||
if address != c.dynamicConfiguration.pprof.address {
|
||||
c.dynamicConfiguration.pprof.address = address
|
||||
if address != c.pprof.address {
|
||||
c.pprof.address = address
|
||||
updated = true
|
||||
}
|
||||
dur := profilerconfig.ShutdownTimeout(c.appCfg)
|
||||
if dur != c.dynamicConfiguration.pprof.shutdownDur {
|
||||
c.dynamicConfiguration.pprof.shutdownDur = dur
|
||||
if dur != c.pprof.shutdownDur {
|
||||
c.pprof.shutdownDur = dur
|
||||
updated = true
|
||||
}
|
||||
|
||||
return c.dynamicConfiguration.pprof, updated
|
||||
return c.pprof, updated
|
||||
}
|
||||
|
||||
func tuneProfilers(c *cfg) {
|
||||
|
|
|
@ -43,6 +43,9 @@ func initQoSService(c *cfg) {
|
|||
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
|
||||
rawTag, defined := qosTagging.IOTagFromContext(ctx)
|
||||
if !defined {
|
||||
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
|
||||
}
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
ioTag, err := qos.FromRawString(rawTag)
|
||||
|
@ -73,20 +76,8 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
|
|||
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
case qos.IOTagInternal:
|
||||
for _, pk := range s.allowedInternalPubs {
|
||||
if bytes.Equal(pk, requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
}
|
||||
nm, err := s.netmapSource.GetNetMap(ctx, 0)
|
||||
if err != nil {
|
||||
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
for _, node := range nm.Nodes() {
|
||||
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
|
@ -95,3 +86,23 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
|
|||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
|
||||
for _, pk := range s.allowedInternalPubs {
|
||||
if bytes.Equal(pk, publicKey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
nm, err := s.netmapSource.GetNetMap(ctx, 0)
|
||||
if err != nil {
|
||||
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
|
||||
return false
|
||||
}
|
||||
for _, node := range nm.Nodes() {
|
||||
if bytes.Equal(node.PublicKey(), publicKey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
226
cmd/frostfs-node/qos_test.go
Normal file
226
cmd/frostfs-node/qos_test.go
Normal file
|
@ -0,0 +1,226 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
||||
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQoSService_Client(t *testing.T) {
|
||||
t.Parallel()
|
||||
s, pk := testQoSServicePrepare(t)
|
||||
t.Run("IO tag client defined", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
|
||||
ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
|
||||
ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Request)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQoSService_Internal(t *testing.T) {
|
||||
t.Parallel()
|
||||
s, pk := testQoSServicePrepare(t)
|
||||
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
||||
})
|
||||
t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
||||
})
|
||||
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
|
||||
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
||||
})
|
||||
t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
|
||||
ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagInternal.String(), tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQoSService_Critical(t *testing.T) {
|
||||
t.Parallel()
|
||||
s, pk := testQoSServicePrepare(t)
|
||||
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagCritical.String(), tag)
|
||||
})
|
||||
t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagCritical.String(), tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQoSService_NetmapGetError(t *testing.T) {
|
||||
t.Parallel()
|
||||
s, pk := testQoSServicePrepare(t)
|
||||
s.netmapSource = &utilTesting.TestNetmapSource{}
|
||||
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
|
||||
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
|
||||
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
|
||||
tag, ok := tagging.IOTagFromContext(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, qos.IOTagClient.String(), tag)
|
||||
})
|
||||
}
|
||||
|
||||
func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
|
||||
nmSigner, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
reqSigner, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
allowedCritSigner, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
allowedIntSigner, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
var node netmap.NodeInfo
|
||||
node.SetPublicKey(nmSigner.PublicKey().Bytes())
|
||||
nm := &netmap.NetMap{}
|
||||
nm.SetEpoch(100)
|
||||
nm.SetNodes([]netmap.NodeInfo{node})
|
||||
|
||||
return &cfgQoSService{
|
||||
logger: test.NewLogger(t),
|
||||
netmapSource: &utilTesting.TestNetmapSource{
|
||||
Netmaps: map[uint64]*netmap.NetMap{
|
||||
100: nm,
|
||||
},
|
||||
CurrentEpoch: 100,
|
||||
},
|
||||
allowedCriticalPubs: [][]byte{
|
||||
allowedCritSigner.PublicKey().Bytes(),
|
||||
},
|
||||
allowedInternalPubs: [][]byte{
|
||||
allowedIntSigner.PublicKey().Bytes(),
|
||||
},
|
||||
},
|
||||
&testQoSServicePublicKeys{
|
||||
NetmapNode: nmSigner.PublicKey().Bytes(),
|
||||
Request: reqSigner.PublicKey().Bytes(),
|
||||
Internal: allowedIntSigner.PublicKey().Bytes(),
|
||||
Critical: allowedCritSigner.PublicKey().Bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
type testQoSServicePublicKeys struct {
|
||||
NetmapNode []byte
|
||||
Request []byte
|
||||
Internal []byte
|
||||
Critical []byte
|
||||
}
|
|
@ -51,9 +51,9 @@ func initTreeService(c *cfg) {
|
|||
c.treeService = tree.New(
|
||||
tree.WithContainerSource(cnrSource{
|
||||
src: c.cfgObject.cnrSource,
|
||||
cli: c.shared.cnrClient,
|
||||
cli: c.cnrClient,
|
||||
}),
|
||||
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
|
||||
tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
|
||||
tree.WithNetmapSource(c.netMapSource),
|
||||
tree.WithPrivateKey(&c.key.PrivateKey),
|
||||
tree.WithLogger(c.log),
|
||||
|
|
|
@ -30,6 +30,11 @@ func validateConfig(c *config.Config) error {
|
|||
return fmt.Errorf("invalid logger destination: %w", err)
|
||||
}
|
||||
|
||||
err = loggerPrm.SetTags(loggerconfig.Tags(c))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid list of allowed tags: %w", err)
|
||||
}
|
||||
|
||||
// shard configuration validation
|
||||
|
||||
shardNum := 0
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -22,17 +21,4 @@ func TestValidate(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mainnet", func(t *testing.T) {
|
||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
|
||||
c := config.New(p, "", config.EnvPrefix)
|
||||
require.NoError(t, validateConfig(c))
|
||||
})
|
||||
t.Run("testnet", func(t *testing.T) {
|
||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
|
||||
c := config.New(p, "", config.EnvPrefix)
|
||||
require.NoError(t, validateConfig(c))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
FROSTFS_IR_LOGGER_LEVEL=info
|
||||
FROSTFS_IR_LOGGER_TIMESTAMP=true
|
||||
FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph"
|
||||
FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug"
|
||||
|
||||
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
|
||||
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
logger:
|
||||
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
|
||||
timestamp: true
|
||||
tags:
|
||||
- names: "main, morph" # Possible values: `main`, `morph`, `grpc_svc`, `ir`, `processor`.
|
||||
level: debug
|
||||
|
||||
wallet:
|
||||
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file
|
||||
|
|
|
@ -180,6 +180,10 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
|
|||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
|
||||
|
@ -197,6 +201,9 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
|
|||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50
|
||||
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100
|
||||
|
||||
## 1 shard
|
||||
### Flag to refill Metabase from BlobStor
|
||||
|
|
|
@ -252,7 +252,13 @@
|
|||
{
|
||||
"tag": "policer",
|
||||
"weight": 5,
|
||||
"limit_ops": 25000
|
||||
"limit_ops": 25000,
|
||||
"prohibited": true
|
||||
},
|
||||
{
|
||||
"tag": "treesync",
|
||||
"weight": 5,
|
||||
"limit_ops": 25
|
||||
}
|
||||
]
|
||||
},
|
||||
|
@ -287,6 +293,11 @@
|
|||
"tag": "policer",
|
||||
"weight": 50,
|
||||
"limit_ops": 2500
|
||||
},
|
||||
{
|
||||
"tag": "treesync",
|
||||
"weight": 50,
|
||||
"limit_ops": 100
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -95,6 +95,9 @@ morph:
|
|||
- address: wss://rpc2.morph.frostfs.info:40341/ws
|
||||
priority: 2
|
||||
ape_chain_cache_size: 100000
|
||||
netmap:
|
||||
candidates:
|
||||
poll_interval: 20s
|
||||
|
||||
apiclient:
|
||||
dial_timeout: 15s # timeout for FrostFS API client connection
|
||||
|
@ -148,7 +151,7 @@ storage:
|
|||
flush_worker_count: 30 # number of write-cache flusher threads
|
||||
|
||||
metabase:
|
||||
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
||||
perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
|
||||
max_batch_size: 200
|
||||
max_batch_delay: 20ms
|
||||
|
||||
|
@ -161,13 +164,13 @@ storage:
|
|||
|
||||
blobstor:
|
||||
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
||||
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||
perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
|
||||
depth: 1 # max depth of object tree storage in key-value DB
|
||||
width: 4 # max width of object tree storage in key-value DB
|
||||
opened_cache_capacity: 50 # maximum number of opened database files
|
||||
opened_cache_ttl: 5m # ttl for opened database file
|
||||
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
|
||||
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
|
||||
depth: 5 # max depth of object tree storage in FS
|
||||
|
||||
gc:
|
||||
|
@ -249,6 +252,10 @@ storage:
|
|||
- tag: policer
|
||||
weight: 5
|
||||
limit_ops: 25000
|
||||
prohibited: true
|
||||
- tag: treesync
|
||||
weight: 5
|
||||
limit_ops: 25
|
||||
write:
|
||||
max_running_ops: 1000
|
||||
max_waiting_ops: 100
|
||||
|
@ -271,6 +278,9 @@ storage:
|
|||
- tag: policer
|
||||
weight: 50
|
||||
limit_ops: 2500
|
||||
- tag: treesync
|
||||
weight: 50
|
||||
limit_ops: 100
|
||||
|
||||
1:
|
||||
writecache:
|
||||
|
@ -290,7 +300,7 @@ storage:
|
|||
pilorama:
|
||||
path: tmp/1/blob/pilorama.db
|
||||
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
|
||||
perm: 0644 # permission to use for the database file and intermediate directories
|
||||
perm: 0o644 # permission to use for the database file and intermediate directories
|
||||
|
||||
tracing:
|
||||
enabled: true
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
# N3 Mainnet Storage node configuration
|
||||
|
||||
Here is a template for simple storage node configuration in N3 Mainnet.
|
||||
Make sure to specify correct values instead of `<...>` placeholders.
|
||||
Do not change `contracts` section. Run the latest frostfs-node release with
|
||||
the fixed config `frostfs-node -c config.yml`
|
||||
|
||||
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
|
||||
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
|
||||
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
|
||||
|
||||
## Tips
|
||||
|
||||
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
|
||||
```yaml
|
||||
node:
|
||||
addresses:
|
||||
- grpcs://frostfs.my.org:8080
|
||||
|
||||
grpc:
|
||||
num: 1
|
||||
0:
|
||||
endpoint: frostfs.my.org:8080
|
||||
tls:
|
||||
enabled: true
|
||||
certificate: /path/to/cert
|
||||
key: /path/to/key
|
||||
```
|
|
@ -1,70 +0,0 @@
|
|||
node:
|
||||
wallet:
|
||||
path: <path/to/wallet>
|
||||
address: <address-in-wallet>
|
||||
password: <password>
|
||||
addresses:
|
||||
- <announced.address:port>
|
||||
attribute_0: UN-LOCODE:<XX YYY>
|
||||
attribute_1: Price:100000
|
||||
attribute_2: User-Agent:FrostFS\/0.9999
|
||||
|
||||
grpc:
|
||||
num: 1
|
||||
0:
|
||||
endpoint: <listen.local.address:port>
|
||||
tls:
|
||||
enabled: false
|
||||
|
||||
storage:
|
||||
shard_num: 1
|
||||
shard:
|
||||
0:
|
||||
metabase:
|
||||
path: /storage/path/metabase
|
||||
perm: 0600
|
||||
blobstor:
|
||||
- path: /storage/path/blobovnicza
|
||||
type: blobovnicza
|
||||
perm: 0600
|
||||
opened_cache_capacity: 32
|
||||
depth: 1
|
||||
width: 1
|
||||
- path: /storage/path/fstree
|
||||
type: fstree
|
||||
perm: 0600
|
||||
depth: 4
|
||||
writecache:
|
||||
enabled: false
|
||||
gc:
|
||||
remover_batch_size: 100
|
||||
remover_sleep_interval: 1m
|
||||
|
||||
logger:
|
||||
level: info
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
address: localhost:9090
|
||||
shutdown_timeout: 15s
|
||||
|
||||
object:
|
||||
put:
|
||||
remote_pool_size: 100
|
||||
local_pool_size: 100
|
||||
|
||||
morph:
|
||||
rpc_endpoint:
|
||||
- wss://rpc1.morph.frostfs.info:40341/ws
|
||||
- wss://rpc2.morph.frostfs.info:40341/ws
|
||||
- wss://rpc3.morph.frostfs.info:40341/ws
|
||||
- wss://rpc4.morph.frostfs.info:40341/ws
|
||||
- wss://rpc5.morph.frostfs.info:40341/ws
|
||||
- wss://rpc6.morph.frostfs.info:40341/ws
|
||||
- wss://rpc7.morph.frostfs.info:40341/ws
|
||||
dial_timeout: 20s
|
||||
|
||||
contracts:
|
||||
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
|
||||
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
|
||||
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
|
|
@ -1,129 +0,0 @@
|
|||
# N3 Testnet Storage node configuration
|
||||
|
||||
There is a prepared configuration for NeoFS Storage Node deployment in
|
||||
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
|
||||
docker image and run it with docker-compose.
|
||||
|
||||
## Build image
|
||||
|
||||
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
|
||||
However, if you need to rebuild it for some reason, run
|
||||
`make image-storage-testnet` command.
|
||||
|
||||
```
|
||||
$ make image-storage-testnet
|
||||
...
|
||||
Successfully built ab0557117b02
|
||||
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
|
||||
```
|
||||
|
||||
## Deploy node
|
||||
|
||||
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
|
||||
update docker-compose file and start the node.
|
||||
|
||||
### Deposit
|
||||
|
||||
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
|
||||
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
|
||||
|
||||
First, obtain GAS in N3 Testnet chain. You can do that with
|
||||
[faucet](https://neowish.ngd.network) service.
|
||||
|
||||
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
|
||||
You can provide scripthash in the `data` argument of transfer tx to make a
|
||||
deposit to a specified account. Otherwise, deposit is made to the tx sender.
|
||||
|
||||
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
|
||||
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
|
||||
|
||||
See a deposit example with `neo-go`.
|
||||
|
||||
```
|
||||
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
|
||||
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
|
||||
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
|
||||
--token GAS \
|
||||
--amount 1
|
||||
```
|
||||
|
||||
### Configure
|
||||
|
||||
Next, configure `node_config.env` file. Change endpoints values. Both
|
||||
should contain your **public** IP.
|
||||
|
||||
```
|
||||
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||
```
|
||||
|
||||
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
|
||||
attribute.
|
||||
|
||||
```
|
||||
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
|
||||
```
|
||||
|
||||
You can validate UN/LOCODE attribute in
|
||||
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
|
||||
with frostfs-cli.
|
||||
|
||||
```
|
||||
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
|
||||
Country: Russia
|
||||
Location: Saint Petersburg (ex Leningrad)
|
||||
Continent: Europe
|
||||
Subdivision: [SPE] Sankt-Peterburg
|
||||
Coordinates: 59.53, 30.15
|
||||
```
|
||||
|
||||
It is recommended to pass the node's key as a file. To do so, convert your wallet
|
||||
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
|
||||
|
||||
```
|
||||
// Print WIF in a 32-byte hex format
|
||||
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
|
||||
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
|
||||
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
|
||||
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
|
||||
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
|
||||
|
||||
// Save 32-byte hex into a file
|
||||
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
|
||||
```
|
||||
|
||||
Then, specify the path to this file in `docker-compose.yml`
|
||||
```yaml
|
||||
volumes:
|
||||
- frostfs_storage:/storage
|
||||
- ./my_wallet.key:/node.key
|
||||
```
|
||||
|
||||
|
||||
NeoFS objects will be stored on your machine. By default, docker-compose
|
||||
is configured to store objects in named docker volume `frostfs_storage`. You can
|
||||
specify a directory on the filesystem to store objects there.
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- /home/username/frostfs/rc3/storage:/storage
|
||||
- ./my_wallet.key:/node.key
|
||||
```
|
||||
|
||||
### Start
|
||||
|
||||
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
|
||||
|
||||
### Debug
|
||||
|
||||
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
|
||||
log, set up log level to debug with this env:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- NEOFS_LOGGER_LEVEL=debug
|
||||
```
|
|
@ -1,52 +0,0 @@
|
|||
logger:
|
||||
level: info
|
||||
|
||||
morph:
|
||||
rpc_endpoint:
|
||||
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
|
||||
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
|
||||
dial_timeout: 20s
|
||||
|
||||
contracts:
|
||||
balance: e0420c216003747626670d1424569c17c79015bf
|
||||
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
|
||||
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
|
||||
|
||||
node:
|
||||
key: /node.key
|
||||
attribute_0: Deployed:SelfHosted
|
||||
attribute_1: User-Agent:FrostFS\/0.9999
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
address: localhost:9090
|
||||
shutdown_timeout: 15s
|
||||
|
||||
storage:
|
||||
shard_num: 1
|
||||
shard:
|
||||
0:
|
||||
metabase:
|
||||
path: /storage/metabase
|
||||
perm: 0777
|
||||
blobstor:
|
||||
- path: /storage/path/blobovnicza
|
||||
type: blobovnicza
|
||||
perm: 0600
|
||||
opened_cache_capacity: 32
|
||||
depth: 1
|
||||
width: 1
|
||||
- path: /storage/path/fstree
|
||||
type: fstree
|
||||
perm: 0600
|
||||
depth: 4
|
||||
writecache:
|
||||
enabled: false
|
||||
gc:
|
||||
remover_batch_size: 100
|
||||
remover_sleep_interval: 1m
|
|
@ -51,10 +51,7 @@ However, all mode changing operations are idempotent.
|
|||
|
||||
## Automatic mode changes
|
||||
|
||||
Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
|
||||
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
|
||||
2. If shard error counter exceeds threshold.
|
||||
3. If the metabase couldn't be reopened during SIGHUP handling.
|
||||
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
|
||||
|
||||
# Detach shard
|
||||
|
||||
|
|
|
@ -148,15 +148,19 @@ morph:
|
|||
- address: wss://rpc2.morph.frostfs.info:40341/ws
|
||||
priority: 2
|
||||
switch_interval: 2m
|
||||
netmap:
|
||||
candidates:
|
||||
poll_interval: 20s
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
|
||||
| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).<br/>Negative value disables caching.<br/>Cached entities: containers, container lists, eACL tables. |
|
||||
| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
|
||||
| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
|
||||
| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
|
||||
| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).<br/>Negative value disables caching.<br/>Cached entities: containers, container lists, eACL tables. |
|
||||
| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
|
||||
| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
|
||||
| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
|
||||
| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
|
||||
|
||||
## `rpc_endpoint` subsection
|
||||
| Parameter | Type | Default value | Description |
|
||||
|
@ -209,7 +213,7 @@ blobstor:
|
|||
width: 4
|
||||
- type: fstree
|
||||
path: /path/to/blobstor/blobovnicza
|
||||
perm: 0644
|
||||
perm: 0o644
|
||||
size: 4194304
|
||||
depth: 1
|
||||
width: 4
|
||||
|
@ -269,7 +273,7 @@ gc:
|
|||
```yaml
|
||||
metabase:
|
||||
path: /path/to/meta.db
|
||||
perm: 0644
|
||||
perm: 0o644
|
||||
max_batch_size: 200
|
||||
max_batch_delay: 20ms
|
||||
```
|
||||
|
@ -359,6 +363,7 @@ limits:
|
|||
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
|
||||
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
|
||||
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
|
||||
| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
|
||||
|
||||
# `node` section
|
||||
|
||||
|
|
10
go.mod
10
go.mod
|
@ -6,13 +6,13 @@ require (
|
|||
code.gitea.io/sdk/gitea v0.17.1
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4
|
||||
|
|
20
go.sum
20
go.sum
|
@ -4,22 +4,22 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9
|
|||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d h1:ZLKDupw362Ciing7kdIZhDYGMyo2QZyJ6sS/8X9QWJ0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d/go.mod h1:2PWt5GwJTnhjHp+mankcfCeAJBMn7puxPm+RS+lliVk=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
|
||||
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
|
||||
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||
|
|
|
@ -1,9 +1,25 @@
|
|||
package assert
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func True(cond bool, details ...string) {
|
||||
if !cond {
|
||||
panic(strings.Join(details, " "))
|
||||
}
|
||||
}
|
||||
|
||||
func False(cond bool, details ...string) {
|
||||
if cond {
|
||||
panic(strings.Join(details, " "))
|
||||
}
|
||||
}
|
||||
|
||||
func NoError(err error, details ...string) {
|
||||
if err != nil {
|
||||
content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
|
||||
panic(content)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -512,7 +512,8 @@ const (
|
|||
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
|
||||
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
|
||||
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
|
||||
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
|
||||
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
|
||||
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
|
||||
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
|
||||
FailedToUpdateNetmapCandidates = "update netmap candidates failed"
|
||||
)
|
||||
|
|
|
@ -15,7 +15,7 @@ func newQoSMetrics() *QoSMetrics {
|
|||
Namespace: namespace,
|
||||
Subsystem: qosSubsystem,
|
||||
Name: "operations_total",
|
||||
Help: "Count of pending, in progree, completed and failed due of resource exhausted error operations for each shard",
|
||||
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
|
||||
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
|
||||
}
|
||||
}
|
||||
|
|
31
internal/qos/config.go
Normal file
31
internal/qos/config.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package qos
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
NoLimit int64 = math.MaxInt64
|
||||
DefaultIdleTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
type LimiterConfig struct {
|
||||
Read OpConfig
|
||||
Write OpConfig
|
||||
}
|
||||
|
||||
type OpConfig struct {
|
||||
MaxWaitingOps int64
|
||||
MaxRunningOps int64
|
||||
IdleTimeout time.Duration
|
||||
Tags []IOTagConfig
|
||||
}
|
||||
|
||||
type IOTagConfig struct {
|
||||
Tag string
|
||||
Weight *float64
|
||||
LimitOps *float64
|
||||
ReservedOps *float64
|
||||
Prohibited bool
|
||||
}
|
|
@ -26,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
|
|||
if err != nil {
|
||||
tag = IOTagClient
|
||||
}
|
||||
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||
if tag.IsLocal() {
|
||||
tag = IOTagInternal
|
||||
}
|
||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||
|
@ -44,7 +44,7 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
|
|||
if err != nil {
|
||||
tag = IOTagClient
|
||||
}
|
||||
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||
if tag.IsLocal() {
|
||||
tag = IOTagInternal
|
||||
}
|
||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||
|
|
219
internal/qos/grpc_test.go
Normal file
219
internal/qos/grpc_test.go
Normal file
|
@ -0,0 +1,219 @@
|
|||
package qos_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
okKey = "ok"
|
||||
)
|
||||
|
||||
var (
|
||||
errTest = errors.New("mock")
|
||||
errWrongTag = errors.New("wrong tag")
|
||||
errNoTag = errors.New("failed to get tag from context")
|
||||
errResExhausted *apistatus.ResourceExhausted
|
||||
tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
|
||||
)
|
||||
|
||||
type mockGRPCServerStream struct {
|
||||
grpc.ServerStream
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (m *mockGRPCServerStream) Context() context.Context {
|
||||
return m.ctx
|
||||
}
|
||||
|
||||
type limiter struct {
|
||||
acquired bool
|
||||
released bool
|
||||
}
|
||||
|
||||
func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
|
||||
l.acquired = true
|
||||
if key != okKey {
|
||||
return nil, false
|
||||
}
|
||||
return func() { l.released = true }, true
|
||||
}
|
||||
|
||||
func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
|
||||
interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
|
||||
handler := func(ctx context.Context, req any) (any, error) {
|
||||
return nil, errTest
|
||||
}
|
||||
_, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
|
||||
return err
|
||||
}
|
||||
|
||||
func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
|
||||
interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
|
||||
handler := func(srv any, stream grpc.ServerStream) error {
|
||||
return errTest
|
||||
}
|
||||
err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
|
||||
FullMethod: methodName,
|
||||
}, handler)
|
||||
return err
|
||||
}
|
||||
|
||||
func Test_MaxActiveRPCLimiter(t *testing.T) {
|
||||
// UnaryServerInterceptor
|
||||
t.Run("unary fail", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
|
||||
require.ErrorAs(t, err, &errResExhausted)
|
||||
require.True(t, lim.acquired)
|
||||
require.False(t, lim.released)
|
||||
})
|
||||
t.Run("unary pass critical", func(t *testing.T) {
|
||||
var lim limiter
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
|
||||
err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
|
||||
require.ErrorIs(t, err, errTest)
|
||||
require.False(t, lim.acquired)
|
||||
require.False(t, lim.released)
|
||||
})
|
||||
t.Run("unary pass", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
|
||||
require.ErrorIs(t, err, errTest)
|
||||
require.True(t, lim.acquired)
|
||||
require.True(t, lim.released)
|
||||
})
|
||||
// StreamServerInterceptor
|
||||
t.Run("stream fail", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
|
||||
require.ErrorAs(t, err, &errResExhausted)
|
||||
require.True(t, lim.acquired)
|
||||
require.False(t, lim.released)
|
||||
})
|
||||
t.Run("stream pass critical", func(t *testing.T) {
|
||||
var lim limiter
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
|
||||
|
||||
err := streamMaxActiveRPCLimiter(ctx, &lim, "")
|
||||
require.ErrorIs(t, err, errTest)
|
||||
require.False(t, lim.acquired)
|
||||
require.False(t, lim.released)
|
||||
})
|
||||
t.Run("stream pass", func(t *testing.T) {
|
||||
var lim limiter
|
||||
|
||||
err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
|
||||
require.ErrorIs(t, err, errTest)
|
||||
require.True(t, lim.acquired)
|
||||
require.True(t, lim.released)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
|
||||
interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
|
||||
called := false
|
||||
handler := func(ctx context.Context, req any) (any, error) {
|
||||
called = true
|
||||
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errWrongTag
|
||||
}
|
||||
_, err := interceptor(context.Background(), nil, nil, handler)
|
||||
require.NoError(t, err)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
|
||||
interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
|
||||
|
||||
// check context with no value
|
||||
called := false
|
||||
invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
|
||||
called = true
|
||||
if _, ok := tagging.IOTagFromContext(ctx); ok {
|
||||
return fmt.Errorf("%v: expected no IO tags", errWrongTag)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
|
||||
require.True(t, called)
|
||||
|
||||
// check context for internal tag
|
||||
targetTag := qos.IOTagInternal.String()
|
||||
invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
|
||||
raw, ok := tagging.IOTagFromContext(ctx)
|
||||
if !ok {
|
||||
return errNoTag
|
||||
}
|
||||
if raw != targetTag {
|
||||
return errWrongTag
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, tag := range tags {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
|
||||
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
|
||||
}
|
||||
|
||||
// check context for client tag
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "")
|
||||
targetTag = qos.IOTagClient.String()
|
||||
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
|
||||
}
|
||||
|
||||
func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
|
||||
interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
|
||||
|
||||
// check context with no value
|
||||
called := false
|
||||
streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
called = true
|
||||
if _, ok := tagging.IOTagFromContext(ctx); ok {
|
||||
return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
_, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
|
||||
require.True(t, called)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check context for internal tag
|
||||
targetTag := qos.IOTagInternal.String()
|
||||
streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
raw, ok := tagging.IOTagFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, errNoTag
|
||||
}
|
||||
if raw != targetTag {
|
||||
return nil, errWrongTag
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
for _, tag := range tags {
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
|
||||
_, err := interceptor(ctx, nil, nil, "", streamer, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// check context for client tag
|
||||
ctx := tagging.ContextWithIOTag(context.Background(), "")
|
||||
targetTag = qos.IOTagClient.String()
|
||||
_, err = interceptor(ctx, nil, nil, "", streamer, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
|
@ -8,7 +8,6 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
|
@ -37,15 +36,15 @@ type scheduler interface {
|
|||
Close()
|
||||
}
|
||||
|
||||
func NewLimiter(c *limits.Config) (Limiter, error) {
|
||||
if err := validateConfig(c); err != nil {
|
||||
func NewLimiter(c LimiterConfig) (Limiter, error) {
|
||||
if err := c.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readScheduler, err := createScheduler(c.Read())
|
||||
readScheduler, err := createScheduler(c.Read)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create read scheduler: %w", err)
|
||||
}
|
||||
writeScheduler, err := createScheduler(c.Write())
|
||||
writeScheduler, err := createScheduler(c.Write)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create write scheduler: %w", err)
|
||||
}
|
||||
|
@ -63,8 +62,8 @@ func NewLimiter(c *limits.Config) (Limiter, error) {
|
|||
return l, nil
|
||||
}
|
||||
|
||||
func createScheduler(config limits.OpConfig) (scheduler, error) {
|
||||
if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit {
|
||||
func createScheduler(config OpConfig) (scheduler, error) {
|
||||
if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit {
|
||||
return newSemaphoreScheduler(config.MaxRunningOps), nil
|
||||
}
|
||||
return scheduling.NewMClock(
|
||||
|
@ -72,9 +71,9 @@ func createScheduler(config limits.OpConfig) (scheduler, error) {
|
|||
converToSchedulingTags(config.Tags), config.IdleTimeout)
|
||||
}
|
||||
|
||||
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
|
||||
func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo {
|
||||
result := make(map[string]scheduling.TagInfo)
|
||||
for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} {
|
||||
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
|
||||
result[tag.String()] = scheduling.TagInfo{
|
||||
Share: defaultShare,
|
||||
}
|
||||
|
@ -90,6 +89,7 @@ func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.T
|
|||
if l.ReservedOps != nil && *l.ReservedOps != 0 {
|
||||
v.ReservedIOPS = l.ReservedOps
|
||||
}
|
||||
v.Prohibited = l.Prohibited
|
||||
result[l.Tag] = v
|
||||
}
|
||||
return result
|
||||
|
@ -164,8 +164,7 @@ func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (R
|
|||
rel, err := s.RequestArrival(ctx, tag)
|
||||
stat.inProgress.Add(1)
|
||||
if err != nil {
|
||||
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
|
||||
errors.Is(err, errSemaphoreLimitExceeded) {
|
||||
if isResourceExhaustedErr(err) {
|
||||
stat.resourceExhausted.Add(1)
|
||||
return nil, &apistatus.ResourceExhausted{}
|
||||
}
|
||||
|
@ -211,13 +210,32 @@ func (n *mClockLimiter) startMetricsCollect() {
|
|||
continue
|
||||
}
|
||||
metrics := n.metrics.Load().metrics
|
||||
for tag, s := range n.readStats {
|
||||
metrics.SetOperationTagCounters(shardID, "read", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load())
|
||||
}
|
||||
for tag, s := range n.writeStats {
|
||||
metrics.SetOperationTagCounters(shardID, "write", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load())
|
||||
}
|
||||
exportMetrics(metrics, n.readStats, shardID, "read")
|
||||
exportMetrics(metrics, n.writeStats, shardID, "write")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) {
|
||||
var pending uint64
|
||||
var inProgress uint64
|
||||
var completed uint64
|
||||
var resExh uint64
|
||||
for tag, s := range stats {
|
||||
pending = s.pending.Load()
|
||||
inProgress = s.inProgress.Load()
|
||||
completed = s.completed.Load()
|
||||
resExh = s.resourceExhausted.Load()
|
||||
if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 {
|
||||
continue
|
||||
}
|
||||
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
|
||||
}
|
||||
}
|
||||
|
||||
func isResourceExhaustedErr(err error) bool {
|
||||
return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
|
||||
errors.Is(err, errSemaphoreLimitExceeded) ||
|
||||
errors.Is(err, scheduling.ErrTagRequestsProhibited)
|
||||
}
|
||||
|
|
|
@ -3,12 +3,13 @@ package qos
|
|||
const unknownStatsTag = "unknown"
|
||||
|
||||
var statTags = map[string]struct{}{
|
||||
IOTagClient.String(): {},
|
||||
IOTagBackground.String(): {},
|
||||
IOTagClient.String(): {},
|
||||
IOTagCritical.String(): {},
|
||||
IOTagInternal.String(): {},
|
||||
IOTagPolicer.String(): {},
|
||||
IOTagTreeSync.String(): {},
|
||||
IOTagWritecache.String(): {},
|
||||
IOTagCritical.String(): {},
|
||||
unknownStatsTag: {},
|
||||
}
|
||||
|
||||
|
|
|
@ -10,30 +10,33 @@ import (
|
|||
type IOTag string
|
||||
|
||||
const (
|
||||
IOTagClient IOTag = "client"
|
||||
IOTagInternal IOTag = "internal"
|
||||
IOTagBackground IOTag = "background"
|
||||
IOTagWritecache IOTag = "writecache"
|
||||
IOTagPolicer IOTag = "policer"
|
||||
IOTagClient IOTag = "client"
|
||||
IOTagCritical IOTag = "critical"
|
||||
IOTagInternal IOTag = "internal"
|
||||
IOTagPolicer IOTag = "policer"
|
||||
IOTagTreeSync IOTag = "treesync"
|
||||
IOTagWritecache IOTag = "writecache"
|
||||
|
||||
ioTagUnknown IOTag = ""
|
||||
)
|
||||
|
||||
func FromRawString(s string) (IOTag, error) {
|
||||
switch s {
|
||||
case string(IOTagCritical):
|
||||
return IOTagCritical, nil
|
||||
case string(IOTagClient):
|
||||
return IOTagClient, nil
|
||||
case string(IOTagInternal):
|
||||
return IOTagInternal, nil
|
||||
case string(IOTagBackground):
|
||||
return IOTagBackground, nil
|
||||
case string(IOTagWritecache):
|
||||
return IOTagWritecache, nil
|
||||
case string(IOTagClient):
|
||||
return IOTagClient, nil
|
||||
case string(IOTagCritical):
|
||||
return IOTagCritical, nil
|
||||
case string(IOTagInternal):
|
||||
return IOTagInternal, nil
|
||||
case string(IOTagPolicer):
|
||||
return IOTagPolicer, nil
|
||||
case string(IOTagTreeSync):
|
||||
return IOTagTreeSync, nil
|
||||
case string(IOTagWritecache):
|
||||
return IOTagWritecache, nil
|
||||
default:
|
||||
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
|
||||
}
|
||||
|
@ -50,3 +53,7 @@ func IOTagFromContext(ctx context.Context) string {
|
|||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func (t IOTag) IsLocal() bool {
|
||||
return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
)
|
||||
|
||||
var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
|
||||
|
@ -14,17 +12,17 @@ type tagConfig struct {
|
|||
Shares, Limit, Reserved *float64
|
||||
}
|
||||
|
||||
func validateConfig(c *limits.Config) error {
|
||||
if err := validateOpConfig(c.Read()); err != nil {
|
||||
func (c *LimiterConfig) Validate() error {
|
||||
if err := validateOpConfig(c.Read); err != nil {
|
||||
return fmt.Errorf("limits 'read' section validation error: %w", err)
|
||||
}
|
||||
if err := validateOpConfig(c.Write()); err != nil {
|
||||
if err := validateOpConfig(c.Write); err != nil {
|
||||
return fmt.Errorf("limits 'write' section validation error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOpConfig(c limits.OpConfig) error {
|
||||
func validateOpConfig(c OpConfig) error {
|
||||
if c.MaxRunningOps <= 0 {
|
||||
return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
|
||||
}
|
||||
|
@ -40,13 +38,14 @@ func validateOpConfig(c limits.OpConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateTags(configTags []limits.IOTagConfig) error {
|
||||
func validateTags(configTags []IOTagConfig) error {
|
||||
tags := map[IOTag]tagConfig{
|
||||
IOTagBackground: {},
|
||||
IOTagClient: {},
|
||||
IOTagInternal: {},
|
||||
IOTagBackground: {},
|
||||
IOTagWritecache: {},
|
||||
IOTagPolicer: {},
|
||||
IOTagTreeSync: {},
|
||||
IOTagWritecache: {},
|
||||
}
|
||||
for _, t := range configTags {
|
||||
tag, err := FromRawString(t.Tag)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
|
||||
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -410,11 +411,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
},
|
||||
),
|
||||
WithNetmapSource(
|
||||
&testNetmapSource{
|
||||
netmaps: map[uint64]*netmap.NetMap{
|
||||
&utilTesting.TestNetmapSource{
|
||||
Netmaps: map[uint64]*netmap.NetMap{
|
||||
curEpoch: currentEpochNM,
|
||||
},
|
||||
currentEpoch: curEpoch,
|
||||
CurrentEpoch: curEpoch,
|
||||
},
|
||||
),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
|
@ -483,12 +484,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
},
|
||||
),
|
||||
WithNetmapSource(
|
||||
&testNetmapSource{
|
||||
netmaps: map[uint64]*netmap.NetMap{
|
||||
&utilTesting.TestNetmapSource{
|
||||
Netmaps: map[uint64]*netmap.NetMap{
|
||||
curEpoch: currentEpochNM,
|
||||
curEpoch - 1: previousEpochNM,
|
||||
},
|
||||
currentEpoch: curEpoch,
|
||||
CurrentEpoch: curEpoch,
|
||||
},
|
||||
),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
|
@ -559,12 +560,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
|
|||
},
|
||||
),
|
||||
WithNetmapSource(
|
||||
&testNetmapSource{
|
||||
netmaps: map[uint64]*netmap.NetMap{
|
||||
&utilTesting.TestNetmapSource{
|
||||
Netmaps: map[uint64]*netmap.NetMap{
|
||||
curEpoch: currentEpochNM,
|
||||
curEpoch - 1: previousEpochNM,
|
||||
},
|
||||
currentEpoch: curEpoch,
|
||||
CurrentEpoch: curEpoch,
|
||||
},
|
||||
),
|
||||
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
|
||||
|
@ -596,26 +597,3 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container
|
|||
func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type testNetmapSource struct {
|
||||
netmaps map[uint64]*netmap.NetMap
|
||||
currentEpoch uint64
|
||||
}
|
||||
|
||||
func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
|
||||
if diff >= s.currentEpoch {
|
||||
return nil, fmt.Errorf("invalid diff")
|
||||
}
|
||||
return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
|
||||
}
|
||||
|
||||
func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
|
||||
if nm, found := s.netmaps[epoch]; found {
|
||||
return nm, nil
|
||||
}
|
||||
return nil, fmt.Errorf("netmap not found")
|
||||
}
|
||||
|
||||
func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) {
|
||||
return s.currentEpoch, nil
|
||||
}
|
||||
|
|
|
@ -13,6 +13,13 @@ type ECInfo struct {
|
|||
Total uint32
|
||||
}
|
||||
|
||||
func (v *ECInfo) String() string {
|
||||
if v == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total)
|
||||
}
|
||||
|
||||
// Info groups object address with its FrostFS
|
||||
// object info.
|
||||
type Info struct {
|
||||
|
@ -23,5 +30,5 @@ type Info struct {
|
|||
}
|
||||
|
||||
func (v Info) String() string {
|
||||
return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject)
|
||||
return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo)
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
|
|||
|
||||
var err error
|
||||
s.netmapProcessor, err = netmap.New(&netmap.Params{
|
||||
Log: s.log,
|
||||
Log: s.log.WithTag(logger.TagProcessor),
|
||||
Metrics: s.irMetrics,
|
||||
PoolSize: poolSize,
|
||||
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
|
||||
|
@ -159,7 +159,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
|
|||
} else {
|
||||
// create governance processor
|
||||
governanceProcessor, err := governance.New(&governance.Params{
|
||||
Log: s.log,
|
||||
Log: s.log.WithTag(logger.TagProcessor),
|
||||
Metrics: s.irMetrics,
|
||||
FrostFSClient: frostfsCli,
|
||||
AlphabetState: s,
|
||||
|
@ -225,7 +225,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er
|
|||
// create alphabet processor
|
||||
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
|
||||
ParsedWallets: parsedWallets,
|
||||
Log: s.log,
|
||||
Log: s.log.WithTag(logger.TagProcessor),
|
||||
Metrics: s.irMetrics,
|
||||
PoolSize: poolSize,
|
||||
AlphabetContracts: s.contracts.alphabet,
|
||||
|
@ -247,7 +247,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c
|
|||
s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
|
||||
// container processor
|
||||
containerProcessor, err := cont.New(&cont.Params{
|
||||
Log: s.log,
|
||||
Log: s.log.WithTag(logger.TagProcessor),
|
||||
Metrics: s.irMetrics,
|
||||
PoolSize: poolSize,
|
||||
AlphabetState: s,
|
||||
|
@ -268,7 +268,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro
|
|||
s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
|
||||
// create balance processor
|
||||
balanceProcessor, err := balance.New(&balance.Params{
|
||||
Log: s.log,
|
||||
Log: s.log.WithTag(logger.TagProcessor),
|
||||
Metrics: s.irMetrics,
|
||||
PoolSize: poolSize,
|
||||
FrostFSClient: frostfsCli,
|
||||
|
@ -291,7 +291,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip
|
|||
s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
|
||||
|
||||
frostfsProcessor, err := frostfs.New(&frostfs.Params{
|
||||
Log: s.log,
|
||||
Log: s.log.WithTag(logger.TagProcessor),
|
||||
Metrics: s.irMetrics,
|
||||
PoolSize: poolSize,
|
||||
FrostFSContract: s.contracts.frostfs,
|
||||
|
@ -342,7 +342,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg
|
|||
|
||||
controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient,
|
||||
controlsrv.WithAllowedKeys(authKeys),
|
||||
), log, audit)
|
||||
), log.WithTag(logger.TagGrpcSvc), audit)
|
||||
|
||||
grpcControlSrv := grpc.NewServer()
|
||||
control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
|
||||
|
@ -458,7 +458,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
|||
}
|
||||
|
||||
morphChain := &chainParams{
|
||||
log: s.log,
|
||||
log: s.log.WithTag(logger.TagMorph),
|
||||
cfg: cfg,
|
||||
key: s.key,
|
||||
name: morphPrefix,
|
||||
|
|
|
@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
|
|||
) (*Server, error) {
|
||||
var err error
|
||||
server := &Server{
|
||||
log: log,
|
||||
log: log.WithTag(logger.TagIr),
|
||||
irMetrics: metrics,
|
||||
cmode: cmode,
|
||||
}
|
||||
|
|
|
@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string {
|
|||
return b.path
|
||||
}
|
||||
|
||||
// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
|
||||
type levelDbManager struct {
|
||||
// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
|
||||
type levelDBManager struct {
|
||||
dbMtx *sync.RWMutex
|
||||
databases map[uint64]*sharedDB
|
||||
|
||||
|
@ -157,8 +157,8 @@ type levelDbManager struct {
|
|||
|
||||
func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string,
|
||||
readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
|
||||
) *levelDbManager {
|
||||
result := &levelDbManager{
|
||||
) *levelDBManager {
|
||||
result := &levelDBManager{
|
||||
databases: make(map[uint64]*sharedDB),
|
||||
dbMtx: &sync.RWMutex{},
|
||||
|
||||
|
@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st
|
|||
return result
|
||||
}
|
||||
|
||||
func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
|
||||
func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
|
||||
res := m.getDBIfExists(idx)
|
||||
if res != nil {
|
||||
return res
|
||||
|
@ -181,14 +181,14 @@ func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
|
|||
return m.getOrCreateDB(idx)
|
||||
}
|
||||
|
||||
func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB {
|
||||
func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB {
|
||||
m.dbMtx.RLock()
|
||||
defer m.dbMtx.RUnlock()
|
||||
|
||||
return m.databases[idx]
|
||||
}
|
||||
|
||||
func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB {
|
||||
func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
|
||||
m.dbMtx.Lock()
|
||||
defer m.dbMtx.Unlock()
|
||||
|
||||
|
@ -202,7 +202,7 @@ func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB {
|
|||
return db
|
||||
}
|
||||
|
||||
func (m *levelDbManager) hasAnyDB() bool {
|
||||
func (m *levelDBManager) hasAnyDB() bool {
|
||||
m.dbMtx.RLock()
|
||||
defer m.dbMtx.RUnlock()
|
||||
|
||||
|
@ -213,7 +213,7 @@ func (m *levelDbManager) hasAnyDB() bool {
|
|||
//
|
||||
// The blobovnicza opens at the first request, closes after the last request.
|
||||
type dbManager struct {
|
||||
levelToManager map[string]*levelDbManager
|
||||
levelToManager map[string]*levelDBManager
|
||||
levelToManagerGuard *sync.RWMutex
|
||||
closedFlag *atomic.Bool
|
||||
dbCounter *openDBCounter
|
||||
|
@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool,
|
|||
options: options,
|
||||
readOnly: readOnly,
|
||||
metrics: metrics,
|
||||
levelToManager: make(map[string]*levelDbManager),
|
||||
levelToManager: make(map[string]*levelDBManager),
|
||||
levelToManagerGuard: &sync.RWMutex{},
|
||||
log: log,
|
||||
closedFlag: &atomic.Bool{},
|
||||
|
@ -266,7 +266,7 @@ func (m *dbManager) Close() {
|
|||
m.dbCounter.WaitUntilAllClosed()
|
||||
}
|
||||
|
||||
func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
|
||||
func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
|
||||
result := m.getLevelManagerIfExists(lvlPath)
|
||||
if result != nil {
|
||||
return result
|
||||
|
@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
|
|||
return m.getOrCreateLevelManager(lvlPath)
|
||||
}
|
||||
|
||||
func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
|
||||
func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager {
|
||||
m.levelToManagerGuard.RLock()
|
||||
defer m.levelToManagerGuard.RUnlock()
|
||||
|
||||
return m.levelToManager[lvlPath]
|
||||
}
|
||||
|
||||
func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
|
||||
func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
|
||||
m.levelToManagerGuard.Lock()
|
||||
defer m.levelToManagerGuard.Unlock()
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) {
|
||||
func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
|
@ -341,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB)
|
|||
b.dbFilesGuard.Lock()
|
||||
defer b.dbFilesGuard.Unlock()
|
||||
|
||||
if err := shDb.CloseAndRemoveFile(ctx); err != nil {
|
||||
if err := shDB.CloseAndRemoveFile(ctx); err != nil {
|
||||
return false, err
|
||||
}
|
||||
b.commondbManager.CleanResources(path)
|
||||
|
|
|
@ -153,5 +153,5 @@ func WithMetrics(m Metrics) Option {
|
|||
}
|
||||
|
||||
func (b *BlobStor) Compressor() *compression.Config {
|
||||
return &b.cfg.compression
|
||||
return &b.compression
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc)
|
|||
|
||||
_, err := s.Iterate(context.Background(), iterPrm)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(objects), len(seen))
|
||||
require.Len(t, objects, len(seen))
|
||||
for i := range objects {
|
||||
d, ok := seen[objects[i].addr.String()]
|
||||
require.True(t, ok)
|
||||
|
|
|
@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm)
|
|||
var csPrm shard.ContainerSizePrm
|
||||
csPrm.SetContainerID(prm.cnr)
|
||||
|
||||
csRes, err := sh.Shard.ContainerSize(ctx, csPrm)
|
||||
csRes, err := sh.ContainerSize(ctx, csPrm)
|
||||
if err != nil {
|
||||
e.reportShardError(ctx, sh, "can't get container size", err,
|
||||
zap.Stringer("container_id", prm.cnr))
|
||||
|
@ -119,7 +119,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes {
|
|||
uniqueIDs := make(map[string]cid.ID)
|
||||
|
||||
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
|
||||
res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{})
|
||||
res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
|
||||
if err != nil {
|
||||
e.reportShardError(ctx, sh, "can't get list of containers", err)
|
||||
return false
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue