Compare commits

..

1 commit

Author SHA1 Message Date
84141702eb [#1596] metrics: Create public aliases for internal engine metrics
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 27s
DCO action / DCO (pull_request) Successful in 34s
Vulncheck / Vulncheck (pull_request) Successful in 53s
Build / Build Components (pull_request) Successful in 1m32s
Pre-commit hooks / Pre-commit (pull_request) Successful in 1m32s
Tests and linters / Staticcheck (pull_request) Successful in 2m0s
Tests and linters / Lint (pull_request) Successful in 2m48s
Tests and linters / Tests (pull_request) Successful in 3m0s
Tests and linters / gopls check (pull_request) Successful in 4m8s
Tests and linters / Tests with -race (pull_request) Successful in 4m14s
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2025-01-13 12:52:02 +03:00
492 changed files with 6557 additions and 10847 deletions

81
.ci/Jenkinsfile vendored
View file

@ -1,81 +0,0 @@
def golang = ['1.23', '1.24']
def golangDefault = "golang:${golang.last()}"
async {
for (version in golang) {
def go = version
task("test/go${go}") {
container("golang:${go}") {
sh 'make test'
}
}
task("build/go${go}") {
container("golang:${go}") {
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
sh """
make bin/frostfs-${app}
bin/frostfs-${app} --version
"""
}
}
}
}
task('test/race') {
container(golangDefault) {
sh 'make test GOFLAGS="-count=1 -race"'
}
}
task('lint') {
container(golangDefault) {
sh 'make lint-install lint'
}
}
task('staticcheck') {
container(golangDefault) {
sh 'make staticcheck-install staticcheck-run'
}
}
task('gopls') {
container(golangDefault) {
sh 'make gopls-install gopls-run'
}
}
task('gofumpt') {
container(golangDefault) {
sh '''
make fumpt-install
make fumpt
git diff --exit-code --quiet
'''
}
}
task('vulncheck') {
container(golangDefault) {
sh '''
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...
'''
}
}
task('pre-commit') {
dockerfile("""
FROM ${golangDefault}
RUN apt update && \
apt install -y --no-install-recommends pre-commit
""") {
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
}
}
}
}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.23', '1.24' ] go_versions: [ '1.22', '1.23' ]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.22'
- name: Run commit format checker - name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -21,7 +21,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: 1.24 go-version: 1.23
- name: Set up Python - name: Set up Python
run: | run: |
apt update apt update

View file

@ -16,7 +16,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
cache: true cache: true
- name: Install linters - name: Install linters
@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.23', '1.24' ] go_versions: [ '1.22', '1.23' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.22'
cache: true cache: true
- name: Run tests - name: Run tests
@ -68,7 +68,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
cache: true cache: true
- name: Install staticcheck - name: Install staticcheck
@ -104,7 +104,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
cache: true cache: true
- name: Install gofumpt - name: Install gofumpt

View file

@ -18,8 +18,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.24' go-version: '1.23'
check-latest: true
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -1,63 +1,43 @@
version: "2" # This file contains all available configuration options
# with their default values.
# options for analysis running
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 20m
# include test files or not, default is true
tests: false tests: false
# output configuration options
output: output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats: formats:
tab: - format: tab
path: stdout
colors: false # all available settings of specific linters
linters: linters-settings:
default: none
enable:
- bidichk
- containedctx
- contextcheck
- copyloopvar
- durationcheck
- errcheck
- exhaustive
- funlen
- gocognit
- gocritic
- godot
- importas
- ineffassign
- intrange
- misspell
- perfsprint
- predeclared
- protogetter
- reassign
- revive
- staticcheck
- testifylint
- truecloudlab-linters
- unconvert
- unparam
- unused
- usetesting
- whitespace
settings:
exhaustive: exhaustive:
# indicates that switch statements are to be considered exhaustive if a
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true default-signifies-exhaustive: true
govet:
# report about shadowed variables
check-shadowing: false
staticcheck:
checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
funlen: funlen:
lines: 80 lines: 80 # default 60
statements: 60 statements: 60 # default 40
gocognit: gocognit:
min-complexity: 40 min-complexity: 40 # default 30
gocritic:
disabled-checks:
- ifElseChain
importas: importas:
alias:
- pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
no-unaliased: true no-unaliased: true
no-extra-aliases: false no-extra-aliases: false
staticcheck: alias:
checks: pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
- all alias: objectSDK
- -QF1002
unused: unused:
field-writes-are-uses: false field-writes-are-uses: false
exported-fields-are-used: false exported-fields-are-used: false
@ -68,40 +48,46 @@ linters:
original-url: git.frostfs.info/TrueCloudLab/linters.git original-url: git.frostfs.info/TrueCloudLab/linters.git
settings: settings:
noliteral: noliteral:
constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs target-methods : ["reportFlushError", "reportError"]
disable-packages: disable-packages: ["codes", "err", "res","exec"]
- codes constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- err
- res linters:
- exec
target-methods:
- reportFlushError
- reportError
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
enable: enable:
- gci # mandatory linters
- govet
- revive
# some default golangci-lint linters
- errcheck
- gosimple
- godot
- ineffassign
- staticcheck
- typecheck
- unused
# extra linters
- bidichk
- durationcheck
- exhaustive
- copyloopvar
- gofmt - gofmt
- goimports - goimports
settings: - misspell
gci: - predeclared
sections: - reassign
- standard - whitespace
- default - containedctx
custom-order: true - funlen
exclusions: - gocognit
generated: lax - contextcheck
paths: - importas
- third_party$ - truecloudlab-linters
- builtin$ - perfsprint
- examples$ - testifylint
- protogetter
- intrange
- tenv
disable-all: true
fast: false

View file

@ -1,6 +1,5 @@
#!/usr/bin/make -f #!/usr/bin/make -f
SHELL = bash SHELL = bash
.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m) REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@ -8,16 +7,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.23 GO_VERSION ?= 1.22
LINT_VERSION ?= 2.0.2 LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0 PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
PROTOC_OS_VERSION=osx-x86_64 PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux) ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64 PROTOC_OS_VERSION=linux-x86_64
endif endif
STATICCHECK_VERSION ?= 2025.1.1 STATICCHECK_VERSION ?= 2024.1.1
ARCH = amd64 ARCH = amd64
BIN = bin BIN = bin
@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
GOPLS_VERSION ?= v0.17.1 GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp) GOPLS_TEMP_FILE := $(shell mktemp)
@ -116,7 +115,7 @@ protoc:
# Install protoc # Install protoc
protoc-install: protoc-install:
@rm -rf $(PROTOBUF_DIR) @rm -rf $(PROTOBUF_DIR)
@mkdir -p $(PROTOBUF_DIR) @mkdir $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... " @echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@ -140,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images # Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container # Run `make %` in Golang container
docker/%: docker/%:
docker run --rm -t \ docker run --rm -t \
@ -170,7 +160,7 @@ imports:
# Install gofumpt # Install gofumpt
fumpt-install: fumpt-install:
@rm -rf $(GOFUMPT_DIR) @rm -rf $(GOFUMPT_DIR)
@mkdir -p $(GOFUMPT_DIR) @mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt # Run gofumpt
@ -187,44 +177,21 @@ test:
@echo "⇒ Running go test" @echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./... @GOFLAGS="$(GOFLAGS)" go test ./...
# Install Gerrit commit-msg hook
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
review-install:
@git config remote.review.url \
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
@mkdir -p $(GIT_HOOK_DIR)/
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
@chmod +x $(GIT_HOOK_DIR)/commit-msg
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
# Create a PR in Gerrit
review: BRANCH ?= master
review:
@git push review HEAD:refs/for/$(BRANCH) \
--push-option r=e.stratonikov@yadro.com \
--push-option r=d.stepanov@yadro.com \
--push-option r=an.nikiforov@yadro.com \
--push-option r=a.arifullin@yadro.com \
--push-option r=ekaterina.lebedeva@yadro.com \
--push-option r=a.savchuk@yadro.com \
--push-option r=a.chuprov@yadro.com
# Run pre-commit # Run pre-commit
pre-commit-run: pre-commit-run:
@pre-commit run -a --hook-stage manual @pre-commit run -a --hook-stage manual
# Install linters # Install linters
lint-install: $(BIN) lint-install:
@rm -rf $(OUTPUT_LINT_DIR) @rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR) @mkdir $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR) @mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters @rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters @rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true @rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters # Run linters
lint: lint:
@ -236,7 +203,7 @@ lint:
# Install staticcheck # Install staticcheck
staticcheck-install: staticcheck-install:
@rm -rf $(STATICCHECK_DIR) @rm -rf $(STATICCHECK_DIR)
@mkdir -p $(STATICCHECK_DIR) @mkdir $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck # Run staticcheck
@ -249,7 +216,7 @@ staticcheck-run:
# Install gopls # Install gopls
gopls-install: gopls-install:
@rm -rf $(GOPLS_DIR) @rm -rf $(GOPLS_DIR)
@mkdir -p $(GOPLS_DIR) @mkdir $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls # Run gopls

View file

@ -16,16 +16,9 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r" EndpointFlagShort = "r"
WalletPath = "wallet"
WalletPathShorthand = "w"
WalletPathUsage = "Path to the wallet"
AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
AdminWalletPath = "wallet-admin"
AdminWalletUsage = "Path to the admin wallet"
LocalDumpFlag = "local-dump" LocalDumpFlag = "local-dump"
ProtoConfigPath = "protocol" ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts" ContractsInitFlag = "contracts"

View file

@ -1,15 +0,0 @@
package maintenance
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
"github.com/spf13/cobra"
)
var RootCmd = &cobra.Command{
Use: "maintenance",
Short: "Section for maintenance commands",
}
func init() {
RootCmd.AddCommand(zombie.Cmd)
}

View file

@ -1,70 +0,0 @@
package zombie
import (
"crypto/ecdsa"
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
keyDesc := viper.GetString(walletFlag)
if keyDesc == "" {
return &nodeconfig.Key(appCfg).PrivateKey
}
data, err := os.ReadFile(keyDesc)
commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
priv, err := keys.NewPrivateKeyFromBytes(data)
if err != nil {
w, err := wallet.NewWalletFromFile(keyDesc)
commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
return fromWallet(cmd, w, viper.GetString(addressFlag))
}
return &priv.PrivateKey
}
func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
var (
addr util.Uint160
err error
)
if addrStr == "" {
addr = w.GetChangeAddress()
} else {
addr, err = flags.ParseAddress(addrStr)
commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
}
acc := w.GetAccount(addr)
if acc == nil {
commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
}
pass, err := getPassword()
commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
return &acc.PrivateKey().PrivateKey
}
func getPassword() (string, error) {
// this check allows empty passwords
if viper.IsSet("password") {
return viper.GetString("password"), nil
}
return input.ReadPassword("Enter password > ")
}

View file

@ -1,31 +0,0 @@
package zombie
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func list(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
storageEngine := newEngine(cmd, appCfg)
q := createQuarantine(cmd, storageEngine.DumpInfo())
var containerID *cid.ID
if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
containerID = &cid.ID{}
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
}
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
if containerID != nil && a.Container() != *containerID {
return nil
}
cmd.Println(a.EncodeToString())
return nil
}))
}

View file

@ -1,46 +0,0 @@
package zombie
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/spf13/cobra"
)
func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
addresses := morphconfig.RPCEndpoint(appCfg)
if len(addresses) == 0 {
commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
}
key := nodeconfig.Key(appCfg)
cli, err := client.New(cmd.Context(),
key,
client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
client.WithEndpoints(addresses...),
client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
)
commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
return cli
}
func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
cc, err := cntClient.NewFromMorph(morph, hs, 0)
commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
return cc
}
func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
cli, err := netmapClient.NewFromMorph(morph, hs, 0)
commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
return cli
}

View file

@ -1,154 +0,0 @@
package zombie
import (
"context"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"sync"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
type quarantine struct {
// mtx protects current field.
mtx sync.Mutex
current int
trees []*fstree.FSTree
}
func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
var paths []string
for _, sh := range engineInfo.Shards {
var storagePaths []string
for _, st := range sh.BlobStorInfo.SubStorages {
storagePaths = append(storagePaths, st.Path)
}
if len(storagePaths) == 0 {
continue
}
paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
}
q, err := newQuarantine(paths)
commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
return q
}
func commonPath(paths []string) string {
if len(paths) == 0 {
return ""
}
if len(paths) == 1 {
return paths[0]
}
minLen := math.MaxInt
for _, p := range paths {
if len(p) < minLen {
minLen = len(p)
}
}
var sb strings.Builder
for i := range minLen {
for _, path := range paths[1:] {
if paths[0][i] != path[i] {
return sb.String()
}
}
sb.WriteByte(paths[0][i])
}
return sb.String()
}
func newQuarantine(paths []string) (*quarantine, error) {
var q quarantine
for i := range paths {
f := fstree.New(
fstree.WithDepth(1),
fstree.WithDirNameLen(1),
fstree.WithPath(paths[i]),
fstree.WithPerm(os.ModePerm),
)
if err := f.Open(mode.ComponentReadWrite); err != nil {
return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
}
if err := f.Init(); err != nil {
return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
}
q.trees = append(q.trees, f)
}
return &q, nil
}
func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
for i := range q.trees {
res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
if err != nil {
continue
}
return res.Object, nil
}
return nil, &apistatus.ObjectNotFound{}
}
func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
for i := range q.trees {
_, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
if err != nil {
continue
}
return nil
}
return &apistatus.ObjectNotFound{}
}
func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
data, err := obj.Marshal()
if err != nil {
return err
}
var prm common.PutPrm
prm.Address = objectcore.AddressOf(obj)
prm.Object = obj
prm.RawData = data
q.mtx.Lock()
current := q.current
q.current = (q.current + 1) % len(q.trees)
q.mtx.Unlock()
_, err = q.trees[current].Put(ctx, prm)
return err
}
func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
var prm common.IteratePrm
prm.Handler = func(elem common.IterationElement) error {
return f(elem.Address)
}
for i := range q.trees {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
_, err := q.trees[i].Iterate(ctx, prm)
if err != nil {
return err
}
}
return nil
}

View file

@ -1,55 +0,0 @@
package zombie
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func remove(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
storageEngine := newEngine(cmd, appCfg)
q := createQuarantine(cmd, storageEngine.DumpInfo())
var containerID cid.ID
cidStr, _ := cmd.Flags().GetString(cidFlag)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
var objectID *oid.ID
oidStr, _ := cmd.Flags().GetString(oidFlag)
if oidStr != "" {
objectID = &oid.ID{}
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
}
if objectID != nil {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(*objectID)
removeObject(cmd, q, addr)
} else {
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
if addr.Container() != containerID {
return nil
}
removeObject(cmd, q, addr)
return nil
}))
}
}
func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
err := q.Delete(cmd.Context(), addr)
if errors.Is(err, new(apistatus.ObjectNotFound)) {
return
}
commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
}

View file

@ -1,69 +0,0 @@
package zombie
import (
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func restore(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
storageEngine := newEngine(cmd, appCfg)
q := createQuarantine(cmd, storageEngine.DumpInfo())
morphClient := createMorphClient(cmd, appCfg)
cnrCli := createContainerClient(cmd, morphClient)
var containerID cid.ID
cidStr, _ := cmd.Flags().GetString(cidFlag)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
var objectID *oid.ID
oidStr, _ := cmd.Flags().GetString(oidFlag)
if oidStr != "" {
objectID = &oid.ID{}
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
}
if objectID != nil {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(*objectID)
restoreObject(cmd, storageEngine, q, addr, cnrCli)
} else {
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
if addr.Container() != containerID {
return nil
}
restoreObject(cmd, storageEngine, q, addr, cnrCli)
return nil
}))
}
}
func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
obj, err := q.Get(cmd.Context(), addr)
commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
rawCID := make([]byte, sha256.Size)
cid := addr.Container()
cid.Encode(rawCID)
cnr, err := cnrCli.Get(cmd.Context(), rawCID)
commonCmd.ExitOnErr(cmd, "get container: %w", err)
putPrm := engine.PutPrm{
Object: obj,
IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
}
commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
}

View file

@ -1,123 +0,0 @@
package zombie
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
flagBatchSize = "batch-size"
flagBatchSizeUsage = "Objects iteration batch size"
cidFlag = "cid"
cidFlagUsage = "Container ID"
oidFlag = "oid"
oidFlagUsage = "Object ID"
walletFlag = "wallet"
walletFlagShorthand = "w"
walletFlagUsage = "Path to the wallet or binary key"
addressFlag = "address"
addressFlagUsage = "Address of wallet account"
moveFlag = "move"
moveFlagUsage = "Move objects from storage engine to quarantine"
)
var (
Cmd = &cobra.Command{
Use: "zombie",
Short: "Zombie objects related commands",
}
scanCmd = &cobra.Command{
Use: "scan",
Short: "Scan storage engine for zombie objects and move them to quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
_ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
_ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
_ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
},
Run: scan,
}
listCmd = &cobra.Command{
Use: "list",
Short: "List zombie objects from quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
},
Run: list,
}
restoreCmd = &cobra.Command{
Use: "restore",
Short: "Restore zombie objects from quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
_ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
},
Run: restore,
}
removeCmd = &cobra.Command{
Use: "remove",
Short: "Remove zombie objects from quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
_ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
},
Run: remove,
}
)
func init() {
initScanCmd()
initListCmd()
initRestoreCmd()
initRemoveCmd()
}
func initScanCmd() {
Cmd.AddCommand(scanCmd)
scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
}
func initListCmd() {
Cmd.AddCommand(listCmd)
listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
listCmd.Flags().String(cidFlag, "", cidFlagUsage)
}
func initRestoreCmd() {
Cmd.AddCommand(restoreCmd)
restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
}
func initRemoveCmd() {
Cmd.AddCommand(removeCmd)
removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
}

View file

@ -1,281 +0,0 @@
package zombie
import (
"context"
"crypto/ecdsa"
"crypto/sha256"
"errors"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
func scan(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
if batchSize == 0 {
commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
}
move, _ := cmd.Flags().GetBool(moveFlag)
storageEngine := newEngine(cmd, appCfg)
morphClient := createMorphClient(cmd, appCfg)
cnrCli := createContainerClient(cmd, morphClient)
nmCli := createNetmapClient(cmd, morphClient)
q := createQuarantine(cmd, storageEngine.DumpInfo())
pk := getPrivateKey(cmd, appCfg)
epoch, err := nmCli.Epoch(cmd.Context())
commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
cmd.Printf("Epoch: %d\n", nm.Epoch())
cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
ps := &processStatus{
statusCount: make(map[status]uint64),
}
stopCh := make(chan struct{})
start := time.Now()
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case <-cmd.Context().Done():
return
case <-stopCh:
return
case <-tick.C:
fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
}
}
}()
go func() {
defer wg.Done()
err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
close(stopCh)
}()
wg.Wait()
commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
cmd.Println()
cmd.Println("Status description:")
cmd.Println("undefined -- nothing is clear")
cmd.Println("found -- object is found in cluster")
cmd.Println("quarantine -- object is not found in cluster")
cmd.Println()
for status, count := range ps.statusCount {
cmd.Printf("Status: %s, Count: %d\n", status, count)
}
}
type status string
const (
statusUndefined status = "undefined"
statusFound status = "found"
statusQuarantine status = "quarantine"
)
func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
rawCID := make([]byte, sha256.Size)
cid := obj.Address.Container()
cid.Encode(rawCID)
cnr, err := cnrCli.Get(ctx, rawCID)
if err != nil {
var errContainerNotFound *apistatus.ContainerNotFound
if errors.As(err, &errContainerNotFound) {
// Policer will deal with this object.
return statusFound, nil
}
return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
}
nm, err := nmCli.NetMap(ctx)
if err != nil {
return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
}
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
if err != nil {
// Not enough nodes, check all netmap nodes.
nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
}
objID := obj.Address.Object()
cnrID := obj.Address.Container()
local := true
raw := false
if obj.ECInfo != nil {
objID = obj.ECInfo.ParentID
local = false
raw = true
}
prm := clientSDK.PrmObjectHead{
ObjectID: &objID,
ContainerID: &cnrID,
Local: local,
Raw: raw,
}
var ni clientCore.NodeInfo
for i := range nodes {
for j := range nodes[i] {
if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
return statusUndefined, fmt.Errorf("parse node info: %w", err)
}
c, err := cc.Get(ni)
if err != nil {
continue
}
res, err := c.ObjectHead(ctx, prm)
if err != nil {
var errECInfo *objectSDK.ECInfoError
if raw && errors.As(err, &errECInfo) {
return statusFound, nil
}
continue
}
if err := apistatus.ErrFromStatus(res.Status()); err != nil {
continue
}
return statusFound, nil
}
}
if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
return statusFound, nil
}
return statusQuarantine, nil
}
func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
) error {
cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
Key: pk,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
})
ctx := cmd.Context()
var cursor *engine.Cursor
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
var prm engine.ListWithCursorPrm
prm.WithCursor(cursor)
prm.WithCount(batchSize)
res, err := storageEngine.ListWithCursor(ctx, prm)
if err != nil {
if errors.Is(err, engine.ErrEndOfListing) {
return nil
}
return fmt.Errorf("list with cursor: %w", err)
}
cursor = res.Cursor()
addrList := res.AddressList()
eg, egCtx := errgroup.WithContext(ctx)
eg.SetLimit(int(batchSize))
for i := range addrList {
addr := addrList[i]
eg.Go(func() error {
result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
if err != nil {
return fmt.Errorf("check object %s status: %w", addr.Address, err)
}
ps.add(result)
if !move && result == statusQuarantine {
cmd.Println(addr)
return nil
}
if result == statusQuarantine {
return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
}
return nil
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("process objects batch: %w", err)
}
}
}
func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
var getPrm engine.GetPrm
getPrm.WithAddress(addr)
res, err := storageEngine.Get(ctx, getPrm)
if err != nil {
return fmt.Errorf("get object %s from storage engine: %w", addr, err)
}
if err := q.Put(ctx, res.Object()); err != nil {
return fmt.Errorf("put object %s to quarantine: %w", addr, err)
}
var delPrm engine.DeletePrm
delPrm.WithForceRemoval()
delPrm.WithAddress(addr)
if err = storageEngine.Delete(ctx, delPrm); err != nil {
return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
}
return nil
}
type processStatus struct {
guard sync.RWMutex
statusCount map[status]uint64
count uint64
}
func (s *processStatus) add(st status) {
s.guard.Lock()
defer s.guard.Unlock()
s.statusCount[st]++
s.count++
}
func (s *processStatus) total() uint64 {
s.guard.RLock()
defer s.guard.RUnlock()
return s.count
}

View file

@ -1,201 +0,0 @@
package zombie
import (
"context"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/panjf2000/ants/v2"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
ngOpts := storageEngineOptions(c)
shardOpts := shardOptions(cmd, c)
e := engine.New(ngOpts...)
for _, opts := range shardOpts {
_, err := e.AddShard(cmd.Context(), opts...)
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
}
commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
return e
}
func storageEngineOptions(c *config.Config) []engine.Option {
return []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
}
}
func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
var result [][]shard.Option
err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
result = append(result, getShardOpts(cmd, c, sh))
return nil
})
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
return result
}
func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
wc, wcEnabled := getWriteCacheOpts(sh)
return []shard.Option{
shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
shard.WithRefillMetabase(sh.RefillMetabase()),
shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
shard.WithMode(sh.Mode()),
shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
shard.WithWriteCache(wcEnabled),
shard.WithWriteCacheOptions(wc),
shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
pool, err := ants.NewPool(sz)
commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
return pool
}),
shard.WithLimiter(qos.NewNoopLimiter()),
}
}
func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
var result []writecache.Option
result = append(result,
writecache.WithPath(wc.Path()),
writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()),
writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithMaxCacheCount(wc.CountLimit()),
writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
writecache.WithQoSLimiter(qos.NewNoopLimiter()),
)
return result, true
}
return nil, false
}
func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
var piloramaOpts []pilorama.Option
if config.BoolSafe(c.Sub("tree"), "enabled") {
pr := sh.Pilorama()
piloramaOpts = append(piloramaOpts,
pilorama.WithPath(pr.Path()),
pilorama.WithPerm(pr.Perm()),
pilorama.WithNoSync(pr.NoSync()),
pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
)
}
return piloramaOpts
}
func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
return []meta.Option{
meta.WithPath(sh.Metabase().Path()),
meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
meta.WithBoltDBOptions(&bbolt.Options{
Timeout: 100 * time.Millisecond,
}),
meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
meta.WithEpochState(&epochState{}),
}
}
func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
result := []blobstor.Option{
blobstor.WithCompression(sh.Compression()),
blobstor.WithStorages(getSubStorages(ctx, sh)),
blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
}
return result
}
func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
var ss []blobstor.SubStorage
for _, storage := range sh.BlobStor().Storages() {
switch storage.Type() {
case blobovniczatree.Type:
sub := blobovniczaconfig.From((*config.Config)(storage))
blobTreeOpts := []blobovniczatree.Option{
blobovniczatree.WithRootPath(storage.Path()),
blobovniczatree.WithPermissions(storage.Perm()),
blobovniczatree.WithBlobovniczaSize(sub.Size()),
blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())),
blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())),
blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
}
ss = append(ss, blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sh.SmallSizeLimit()
},
})
case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storage))
fstreeOpts := []fstree.Option{
fstree.WithPath(storage.Path()),
fstree.WithPerm(storage.Perm()),
fstree.WithDepth(sub.Depth()),
fstree.WithNoSync(sub.NoSync()),
fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
}
ss = append(ss, blobstor.SubStorage{
Storage: fstree.New(fstreeOpts...),
Policy: func(_ *objectSDK.Object, _ []byte) bool {
return true
},
})
default:
// should never happen, that has already
// been handled: when the config was read
}
}
return ss
}
type epochState struct{}
func (epochState) CurrentEpoch() uint64 {
return 0
}

View file

@ -28,7 +28,6 @@ const (
var ( var (
errNoPathsFound = errors.New("no metabase paths found") errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found") errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
) )
var UpgradeCmd = &cobra.Command{ var UpgradeCmd = &cobra.Command{
@ -92,20 +91,15 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
return err return err
} }
allSuccess := true
for mb, ok := range result { for mb, ok := range result {
if ok { if ok {
cmd.Println(mb, ": success") cmd.Println(mb, ": success")
} else { } else {
cmd.Println(mb, ": failed") cmd.Println(mb, ": failed")
allSuccess = false
} }
} }
if allSuccess {
return nil return nil
} }
return errUpgradeFailed
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) { func getMetabasePaths(appCfg *config.Config) ([]string, error) {
var paths []string var paths []string

View file

@ -3,8 +3,6 @@ package ape
import ( import (
"errors" "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -78,8 +76,7 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160 var ch util.Uint160

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i), helper.GetAlphabetNNSDomain(i),
int64(nns.TXT)) int64(nns.TXT))
} }
assert.NoError(w.Err) if w.Err != nil {
panic(w.Err)
}
alphaRes, err := c.InvokeScript(w.Bytes(), nil) alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil { if err != nil {
@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts { for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
} }
assert.NoError(w.Err) if w.Err != nil {
panic(w.Err)
}
res, err := c.Run(w.Bytes()) res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {

View file

@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
nbuf := make([]byte, 8) nbuf := make([]byte, 8)
copy(nbuf, v) copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf) n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 { if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k) return helper.InvalidConfigValueErr(k)
} }
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default: default:
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
} }
} }

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
@ -236,7 +235,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt) putContainer(bw, ch, cnt)
assert.NoError(bw.Err) if bw.Err != nil {
panic(bw.Err)
}
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err return err

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/encoding/address"
@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
} }
} }
assert.NoError(writer.Err, "can't create deployment script") if writer.Err != nil {
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
}
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err return err
@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
} }
assert.NoError(bw.Err, "can't create deployment script") if bw.Err != nil {
if bw.Len() != start { panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
} else if bw.Len() != start {
writer.WriteBytes(bw.Bytes()) writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)

View file

@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@ -220,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" { if info.version == "" {
info.version = "unknown" info.version = "unknown"
} }
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE())) info.name, info.version, info.hash.StringLE())))
} }
_ = tw.Flush() _ = tw.Flush()
@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else { } else {
sub.Reset() sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
assert.NoError(sub.Err, "can't create version script") if sub.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
script := sub.Bytes() script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
bw.WriteBytes(script) bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0) emit.Opcodes(bw.BinWriter, opcode.PUSH0)
} }
} }
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
assert.NoError(bw.Err, "can't create version script") if bw.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
res, err := c.InvokeScript(bw.Bytes(), nil) res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil { if err != nil {

View file

@ -1,7 +1,6 @@
package frostfsid package frostfsid
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
@ -34,16 +33,11 @@ const (
subjectNameFlag = "subject-name" subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key" subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address" subjectAddressFlag = "subject-address"
extendedFlag = "extended" includeNamesFlag = "include-names"
groupNameFlag = "group-name" groupNameFlag = "group-name"
groupIDFlag = "group-id" groupIDFlag = "group-id"
rootNamespacePlaceholder = "<root>" rootNamespacePlaceholder = "<root>"
keyFlag = "key"
keyDescFlag = "Key for storing a value in the subject's KV storage"
valueFlag = "value"
valueDescFlag = "Value to be stored in the subject's KV storage"
) )
var ( var (
@ -157,23 +151,6 @@ var (
}, },
Run: frostfsidListGroupSubjects, Run: frostfsidListGroupSubjects,
} }
frostfsidSetKVCmd = &cobra.Command{
Use: "set-kv",
Short: "Store a key-value pair in the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidSetKV,
}
frostfsidDeleteKVCmd = &cobra.Command{
Use: "delete-kv",
Short: "Delete a value from the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidDeleteKV,
}
) )
func initFrostfsIDCreateNamespaceCmd() { func initFrostfsIDCreateNamespaceCmd() {
@ -209,7 +186,7 @@ func initFrostfsIDListSubjectsCmd() {
Cmd.AddCommand(frostfsidListSubjectsCmd) Cmd.AddCommand(frostfsidListSubjectsCmd)
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
} }
func initFrostfsIDCreateGroupCmd() { func initFrostfsIDCreateGroupCmd() {
@ -256,22 +233,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
}
func initFrostfsIDSetKVCmd() {
Cmd.AddCommand(frostfsidSetKVCmd)
frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
}
func initFrostfsIDDeleteKVCmd() {
Cmd.AddCommand(frostfsidDeleteKVCmd)
frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
} }
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
@ -291,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash) reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces() sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items) namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -336,32 +298,34 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
} }
func frostfsidListSubjects(cmd *cobra.Command, _ []string) { func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
extended, _ := cmd.Flags().GetBool(extendedFlag) includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd) inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash) reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaceSubjects(ns) sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
for _, addr := range subAddresses { for _, addr := range subAddresses {
if !extended { if !includeNames {
cmd.Println(address.Uint160ToString(addr)) cmd.Println(address.Uint160ToString(addr))
continue continue
} }
items, err := reader.GetSubject(addr) sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, addr, subj) cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
cmd.Println()
} }
} }
@ -401,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns) sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items) groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -439,49 +403,10 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
} }
func frostfsidSetKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
value, _ := cmd.Flags().GetString(valueFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "set KV: %w", err)
}
func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
}
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd) groupID := getFrostfsIDGroupID(cmd)
extended, _ := cmd.Flags().GetBool(extendedFlag) includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
inv, cs, hash := initInvoker(cmd) inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
@ -490,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -499,7 +424,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
for _, subjAddr := range subjects { for _, subjAddr := range subjects {
if !extended { if !includeNames {
cmd.Println(address.Uint160ToString(subjAddr)) cmd.Println(address.Uint160ToString(subjAddr))
continue continue
} }
@ -508,8 +433,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, subjAddr, subj) cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
cmd.Println()
} }
} }
@ -568,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
} }
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool var shouldStop bool
res := make([]stackitem.Item, 0) res := make([]stackitem.Item, 0)
for !shouldStop { for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil { if err != nil {
return nil, err return nil, err
} }
res = append(res, items...) res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize shouldStop = len(items) < batchSize
} }
return res, nil return res, nil
@ -599,30 +523,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
return inv, cs, nmHash return inv, cs, nmHash
} }
func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
pk := "<nil>"
if subj.PrimaryKey != nil {
pk = subj.PrimaryKey.String()
}
cmd.Printf("Primary key: %s\n", pk)
cmd.Printf("Name: %s\n", subj.Name)
cmd.Printf("Namespace: %s\n", subj.Namespace)
if len(subj.AdditionalKeys) > 0 {
cmd.Printf("Additional keys:\n")
for _, key := range subj.AdditionalKeys {
k := "<nil>"
if key != nil {
k = key.String()
}
cmd.Printf("- %s\n", k)
}
}
if len(subj.KV) > 0 {
cmd.Printf("KV:\n")
for k, v := range subj.KV {
cmd.Printf("- %s: %s\n", k, v)
}
}
}

View file

@ -12,8 +12,6 @@ func init() {
initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDAddSubjectToGroupCmd()
initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd()
initFrostfsIDListGroupSubjectsCmd() initFrostfsIDListGroupSubjectsCmd()
initFrostfsIDSetKVCmd()
initFrostfsIDDeleteKVCmd()
initFrostfsIDAddSubjectKeyCmd() initFrostfsIDAddSubjectKeyCmd()
initFrostfsIDRemoveSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd()
} }

View file

@ -3,6 +3,9 @@ package helper
import ( import (
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -13,6 +16,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -24,86 +28,32 @@ type LocalActor struct {
rpcInvoker invoker.RPCInvoke rpcInvoker invoker.RPCInvoke
} }
type AlphabetWallets struct {
Label string
Path string
}
func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
w, err := GetAlphabetWallets(v, a.Path)
if err != nil {
return nil, err
}
var accounts []*wallet.Account
for _, wall := range w {
acc, err := GetWalletAccount(wall, a.Label)
if err != nil {
return nil, err
}
accounts = append(accounts, acc)
}
return accounts, nil
}
type RegularWallets struct{ Path string }
func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
w, err := getRegularWallet(r.Path)
if err != nil {
return nil, err
}
return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
}
// NewLocalActor create LocalActor with accounts form provided wallets. // NewLocalActor create LocalActor with accounts form provided wallets.
// In case of empty wallets provided created actor with dummy account only for read operation. // In case of empty wallets provided created actor with dummy account only for read operation.
// //
// If wallets are provided, the contract client will use accounts with accName name from these wallets. // If wallets are provided, the contract client will use accounts with accName name from these wallets.
// To determine which account name should be used in a contract client, refer to how the contract // To determine which account name should be used in a contract client, refer to how the contract
// verifies the transaction signature. // verifies the transaction signature.
func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) {
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
var act *actor.Actor var act *actor.Actor
var accounts []*wallet.Account var accounts []*wallet.Account
var signers []actor.SignerAccount
if alphabet != nil { wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
account, err := alphabet.GetAccount(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
if err != nil {
return nil, err for _, w := range wallets {
acc, err := GetWalletAccount(w, accName)
commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
accounts = append(accounts, acc)
} }
act, err = actor.New(c, []actor.SignerAccount{{
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{ Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(), Account: accounts[0].Contract.ScriptHash(),
Scopes: transaction.Global, Scopes: transaction.Global,
}, },
Account: account[0], Account: accounts[0],
}) }})
}
for _, w := range regularWallets {
if w == nil {
continue
}
account, err := w.GetAccount()
if err != nil {
return nil, err
}
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: account[0],
})
}
act, err := actor.New(c, signers)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -6,7 +6,6 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -14,7 +13,9 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
} }
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
switch c.(type) {
case *rpcclient.Client:
inv := invoker.New(c, nil) inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash) reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name) return reader.IsAvailable(name)
default:
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
}
return b, nil
}
} }
func CheckNotaryEnabled(c Client) error { func CheckNotaryEnabled(c Client) error {

View file

@ -13,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
@ -22,7 +21,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
@ -30,6 +28,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode" "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -376,7 +375,9 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen
} }
act, err = actor.New(c.Client, signers) act, err = actor.New(c.Client, signers)
} else { } else {
assert.False(withConsensus, "BUG: should never happen") if withConsensus {
panic("BUG: should never happen")
}
act, err = c.CommitteeAct, nil act, err = c.CommitteeAct, nil
} }
if err != nil { if err != nil {
@ -410,9 +411,11 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp
func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
version, err := c.Client.GetVersion() version, err := c.Client.GetVersion()
if err != nil {
// error appears only if client // error appears only if client
// has not been initialized // has not been initialized
assert.NoError(err) panic(err)
}
network := version.Protocol.Network network := version.Protocol.Network
// Use parameter context to avoid dealing with signature order. // Use parameter context to avoid dealing with signature order.
@ -444,12 +447,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin
for i := range tx.Signers { for i := range tx.Signers {
if tx.Signers[i].Account == h { if tx.Signers[i].Account == h {
assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
if i < len(tx.Scripts) { if i < len(tx.Scripts) {
tx.Scripts[i] = *w tx.Scripts[i] = *w
} } else if i == len(tx.Scripts) {
if i == len(tx.Scripts) {
tx.Scripts = append(tx.Scripts, *w) tx.Scripts = append(tx.Scripts, *w)
} else {
panic("BUG: invalid signing order")
} }
return nil return nil
} }
@ -507,7 +510,9 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
assert.NoError(bw.Err) if bw.Err != nil {
panic(bw.Err)
}
return bw.Bytes(), false, nil return bw.Bytes(), false, nil
} }
@ -519,8 +524,12 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
} }
func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
return !avail, err if err != nil {
return false, err
}
return res.State == vmstate.Halt.String(), nil
} }
func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/core" "github.com/nspcc-dev/neo-go/pkg/core"
@ -317,7 +316,9 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint
func (l *LocalClient) putTransactions() error { func (l *LocalClient) putTransactions() error {
// 1. Prepare new block. // 1. Prepare new block.
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
assert.NoError(err) if err != nil {
panic(err)
}
defer func() { l.transactions = l.transactions[:0] }() defer func() { l.transactions = l.transactions[:0] }()
b := &block.Block{ b := &block.Block{
@ -358,7 +359,9 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s
w := io.NewBufBinWriter() w := io.NewBufBinWriter()
emit.Array(w.BinWriter, parameters...) emit.Array(w.BinWriter, parameters...)
emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) if w.Err != nil {
panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
}
return c.InvokeScript(w.Bytes(), signers) return c.InvokeScript(w.Bytes(), signers)
} }

View file

@ -3,7 +3,6 @@ package helper
import ( import (
"errors" "errors"
"fmt" "fmt"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err return err
} }
for k, v := range m { for k, v := range m {
if slices.Contains(NetmapConfigKeys, k) { for _, key := range NetmapConfigKeys {
if k == key {
md[k] = v md[k] = v
break
}
} }
} }
return nil return nil

View file

@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
@ -23,27 +22,6 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
) )
func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
w, err := wallet.NewWalletFromFile(walletPath)
if err != nil {
return nil, err
}
password, err := input.ReadPassword("Enter password for wallet:")
if err != nil {
return nil, fmt.Errorf("can't fetch password: %w", err)
}
for i := range w.Accounts {
if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
err = fmt.Errorf("can't unlock wallet: %w", err)
break
}
}
return w, err
}
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
wallets, err := openAlphabetWallets(v, walletDir) wallets, err := openAlphabetWallets(v, walletDir)
if err != nil { if err != nil {
@ -73,7 +51,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
err = nil err = nil
} else { } else {
err = fmt.Errorf("can't open alphabet wallet: %w", err) err = fmt.Errorf("can't open wallet: %w", err)
} }
break break
} }

View file

@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -112,7 +111,9 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
assert.NoError(w.Err, "can't wrap register script") if w.Err != nil {
panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
}
} }
func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {

View file

@ -1,18 +1,21 @@
package initialize package initialize
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
@ -27,8 +30,7 @@ const (
) )
func registerCandidateRange(c *helper.InitializeContext, start, end int) error { func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
reader := neo.NewReader(c.ReadOnlyInvoker) regPrice, err := getCandidateRegisterPrice(c)
regPrice, err := reader.GetRegisterPrice()
if err != nil { if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err) return fmt.Errorf("can't fetch registration price: %w", err)
} }
@ -40,7 +42,9 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
emit.Opcodes(w.BinWriter, opcode.ASSERT) emit.Opcodes(w.BinWriter, opcode.ASSERT)
} }
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
assert.NoError(w.Err) if w.Err != nil {
panic(fmt.Sprintf("BUG: %v", w.Err))
}
signers := []actor.SignerAccount{{ signers := []actor.SignerAccount{{
Signer: c.GetSigner(false, c.CommitteeAcc), Signer: c.GetSigner(false, c.CommitteeAcc),
@ -112,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash neoHash := neo.Hash
ok, err := transferNEOFinished(c) ok, err := transferNEOFinished(c, neoHash)
if ok || err != nil { if ok || err != nil {
return err return err
} }
@ -135,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx() return c.AwaitTx()
} }
func transferNEOFinished(c *helper.InitializeContext) (bool, error) { func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
r := neo.NewReader(c.ReadOnlyInvoker) r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
} }
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
switch c.Client.(type) {
case *rpcclient.Client:
inv := invoker.New(c.Client, nil)
reader := neo.NewReader(inv)
return reader.GetRegisterPrice()
default:
neoHash := neo.Hash
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
if err != nil {
return 0, err
}
if len(res.Stack) == 0 {
return 0, errGetPriceInvalid
}
bi, err := res.Stack[0].TryInteger()
if err != nil || !bi.IsInt64() {
return 0, errGetPriceInvalid
}
return bi.Int64(), nil
}
}

View file

@ -22,14 +22,15 @@ import (
) )
const ( const (
gasInitialTotalSupply = 30000000 * native.GASFactor
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
initialAlphabetGASAmount = 10_000 * native.GASFactor initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract. // initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor initialProxyGASAmount = 50_000 * native.GASFactor
) )
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
} }
func transferFunds(c *helper.InitializeContext) error { func transferFunds(c *helper.InitializeContext) error {
@ -41,11 +42,6 @@ func transferFunds(c *helper.InitializeContext) error {
return err return err
} }
version, err := c.Client.GetVersion()
if err != nil {
return err
}
var transfers []transferTarget var transfers []transferTarget
for _, acc := range c.Accounts { for _, acc := range c.Accounts {
to := acc.Contract.ScriptHash() to := acc.Contract.ScriptHash()
@ -63,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{ transferTarget{
Token: gas.Hash, Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(), Address: c.CommitteeAcc.Contract.ScriptHash(),
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), Amount: initialCommitteeGASAmount(c),
}, },
transferTarget{ transferTarget{
Token: neo.Hash, Token: neo.Hash,
@ -87,23 +83,16 @@ func transferFunds(c *helper.InitializeContext) error {
// transferFundsFinished checks balances of accounts we transfer GAS to. // transferFundsFinished checks balances of accounts we transfer GAS to.
// The stage is considered finished if the balance is greater than the half of what we need to transfer. // The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) { func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) acc := c.Accounts[0]
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
if err != nil {
return false, err
}
version, err := c.Client.GetVersion() r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { res, err := r.BalanceOf(acc.Contract.ScriptHash())
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
return false, err return false, err
} }
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
if err != nil { return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
return false, err
}
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
} }
func transferGASToProxy(c *helper.InitializeContext) error { func transferGASToProxy(c *helper.InitializeContext) error {

View file

@ -6,9 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper"
) )
func initRegisterCmd() { func initRegisterCmd() {
@ -21,7 +19,6 @@ func initRegisterCmd() {
registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
} }
@ -51,7 +48,6 @@ func initDeleteCmd() {
deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
} }
@ -66,28 +62,3 @@ func deleteDomain(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
cmd.Println("Domain deleted successfully") cmd.Println("Domain deleted successfully")
} }
func initSetAdminCmd() {
Cmd.AddCommand(setAdminCmd)
setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
_ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
_ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
}
func setAdmin(cmd *cobra.Command, _ []string) {
c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
_, err = actor.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
cmd.Println("Set admin successfully")
}

View file

@ -1,11 +1,7 @@
package nns package nns
import ( import (
"errors"
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -20,32 +16,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
c, err := helper.NewRemoteClient(v) c, err := helper.NewRemoteClient(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
var (
alphabet *helper.AlphabetWallets
regularWallets []*helper.RegularWallets
)
if alphabetWalletPath != "" {
alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
}
if walletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
}
if adminWalletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
}
if alphabet == nil && regularWallets == nil {
commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
}
ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker) r := management.NewReader(ac.Invoker)

View file

@ -19,7 +19,6 @@ func initAddRecordCmd() {
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
@ -41,7 +40,6 @@ func initDelRecordsCmd() {
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
@ -54,7 +52,6 @@ func initDelRecordCmd() {
delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)

View file

@ -39,7 +39,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: registerDomain, Run: registerDomain,
} }
@ -49,7 +48,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: deleteDomain, Run: deleteDomain,
} }
@ -77,7 +75,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: addRecord, Run: addRecord,
} }
@ -95,7 +92,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: delRecords, Run: delRecords,
} }
@ -105,21 +101,9 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: delRecord, Run: delRecord,
} }
setAdminCmd = &cobra.Command{
Use: "set-admin",
Short: "Sets admin for domain",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
},
Run: setAdmin,
}
) )
func init() { func init() {
@ -132,5 +116,4 @@ func init() {
initGetRecordsCmd() initGetRecordsCmd()
initDelRecordsCmd() initDelRecordsCmd()
initDelRecordCmd() initDelRecordCmd()
initSetAdminCmd()
} }

View file

@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
_ = tw.Flush() _ = tw.Flush()
cmd.Print(buf.String()) cmd.Print(buf.String())

View file

@ -5,9 +5,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@ -41,8 +41,8 @@ func init() {
rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd) rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd) rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(maintenance.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))

View file

@ -0,0 +1,137 @@
package storagecfg
const configTemplate = `logger:
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
node:
wallet:
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
addresses: # list of addresses announced by Storage node in the Network map
- {{ .AnnouncedAddress }}
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
grpc:
num: 1 # total number of listener endpoints
0:
endpoint: {{ .Endpoint }} # endpoint for gRPC server
tls:{{if .TLSCert}}
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
certificate: {{ .TLSCert }} # path to TLS certificate
key: {{ .TLSKey }} # path to TLS key
{{- else }}
enabled: false # disable TLS for a gRPC connection
{{- end}}
control:
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
{{- range .AuthorizedKeys }}
- {{.}}{{end}}
grpc:
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
morph:
dial_timeout: 20s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # use TTL cache for side chain GET operations
rpc_endpoint: # side chain N3 RPC endpoints
{{- range .MorphRPC }}
- address: wss://{{.}}/ws{{end}}
{{if not .Relay }}
storage:
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard:
default: # section with the default shard parameters
metabase:
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
blobstor:
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 2 # max depth of object tree storage in FS
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
compress: true # turn on/off Zstandard compression (level 3) of stored objects
compression_exclude_content_types:
- audio/*
- video/*
blobovnicza:
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
gc:
remover_batch_size: 200 # number of objects to be removed by the garbage collector
remover_sleep_interval: 5m # frequency of the garbage collector invocation
0:
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
metabase:
path: {{ .MetabasePath }} # path to the metabase
blobstor:
path: {{ .BlobstorPath }} # path to the blobstor
{{end}}`
const (
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
)
var n3config = map[string]struct {
MorphRPC []string
RPC []string
NeoFSContract string
BalanceContract string
}{
"testnet": {
MorphRPC: []string{
"rpc01.morph.testnet.fs.neo.org:51331",
"rpc02.morph.testnet.fs.neo.org:51331",
"rpc03.morph.testnet.fs.neo.org:51331",
"rpc04.morph.testnet.fs.neo.org:51331",
"rpc05.morph.testnet.fs.neo.org:51331",
"rpc06.morph.testnet.fs.neo.org:51331",
"rpc07.morph.testnet.fs.neo.org:51331",
},
RPC: []string{
"rpc01.testnet.n3.nspcc.ru:21331",
"rpc02.testnet.n3.nspcc.ru:21331",
"rpc03.testnet.n3.nspcc.ru:21331",
"rpc04.testnet.n3.nspcc.ru:21331",
"rpc05.testnet.n3.nspcc.ru:21331",
"rpc06.testnet.n3.nspcc.ru:21331",
"rpc07.testnet.n3.nspcc.ru:21331",
},
NeoFSContract: neofsTestnetAddress,
BalanceContract: balanceTestnetAddress,
},
"mainnet": {
MorphRPC: []string{
"rpc1.morph.fs.neo.org:40341",
"rpc2.morph.fs.neo.org:40341",
"rpc3.morph.fs.neo.org:40341",
"rpc4.morph.fs.neo.org:40341",
"rpc5.morph.fs.neo.org:40341",
"rpc6.morph.fs.neo.org:40341",
"rpc7.morph.fs.neo.org:40341",
},
RPC: []string{
"rpc1.n3.nspcc.ru:10331",
"rpc2.n3.nspcc.ru:10331",
"rpc3.n3.nspcc.ru:10331",
"rpc4.n3.nspcc.ru:10331",
"rpc5.n3.nspcc.ru:10331",
"rpc6.n3.nspcc.ru:10331",
"rpc7.n3.nspcc.ru:10331",
},
NeoFSContract: neofsMainnetAddress,
BalanceContract: balanceMainnetAddress,
},
}

View file

@ -0,0 +1,433 @@
package storagecfg
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"text/template"
"time"
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/chzyer/readline"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
)
const (
walletFlag = "wallet"
accountFlag = "account"
)
const (
defaultControlEndpoint = "localhost:8090"
defaultDataEndpoint = "localhost"
)
// RootCmd is a root command of config section.
var RootCmd = &cobra.Command{
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
Short: "Section for storage node configuration commands",
Run: storageConfig,
}
func init() {
fs := RootCmd.Flags()
fs.StringP(walletFlag, "w", "", "Path to wallet")
fs.StringP(accountFlag, "a", "", "Wallet account")
}
type config struct {
AnnouncedAddress string
AuthorizedKeys []string
ControlEndpoint string
Endpoint string
TLSCert string
TLSKey string
MorphRPC []string
Attribute struct {
Locode string
}
Wallet struct {
Path string
Account string
Password string
}
Relay bool
BlobstorPath string
MetabasePath string
}
func storageConfig(cmd *cobra.Command, args []string) {
outPath := getOutputPath(args)
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
readline.SetHistoryPath(historyPath)
var c config
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
if c.Wallet.Path == "" {
c.Wallet.Path = getPath("Path to the storage node wallet: ")
}
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
fatalOnErr(err)
fillWalletAccount(cmd, &c, w)
accH, err := flags.ParseAddress(c.Wallet.Account)
fatalOnErr(err)
acc := w.GetAccount(accH)
if acc == nil {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
fatalOnErr(err)
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
network := readNetwork(cmd)
c.MorphRPC = n3config[network].MorphRPC
depositGas(cmd, acc, network)
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
endpoint := getDefaultEndpoint(cmd, &c)
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
if c.Endpoint == "" {
c.Endpoint = endpoint
}
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
if c.ControlEndpoint == "" {
c.ControlEndpoint = defaultControlEndpoint
}
c.TLSCert = getPath("TLS Certificate (optional): ")
if c.TLSCert != "" {
c.TLSKey = getPath("TLS Key: ")
}
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
if !c.Relay {
p := getPath("Path to the storage directory (all available storage will be used): ")
c.BlobstorPath = filepath.Join(p, "blob")
c.MetabasePath = filepath.Join(p, "meta")
}
out := applyTemplate(c)
fatalOnErr(os.WriteFile(outPath, out, 0o644))
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
}
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
var addr, port string
for {
c.AnnouncedAddress = getString("Publicly announced address: ")
validator := netutil.Address{}
err := validator.FromString(c.AnnouncedAddress)
if err != nil {
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
continue
}
uriAddr, err := url.Parse(validator.URIAddr())
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
addr = uriAddr.Hostname()
port = uriAddr.Port()
ip, err := net.ResolveIPAddr("ip", addr)
if err != nil {
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
continue
}
if !ip.IP.IsGlobalUnicast() {
cmd.Println("IP must be global unicast.")
continue
}
cmd.Printf("Resolved IP address: %s\n", ip.String())
_, err = strconv.ParseUint(port, 10, 16)
if err != nil {
cmd.Println("Port must be an integer.")
continue
}
break
}
return net.JoinHostPort(defaultDataEndpoint, port)
}
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
if c.Wallet.Account == "" {
addr := address.Uint160ToString(w.GetChangeAddress())
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
if c.Wallet.Account == "" {
c.Wallet.Account = addr
}
}
}
func readNetwork(cmd *cobra.Command) string {
var network string
for {
network = getString("Choose network [mainnet]/testnet: ")
switch network {
case "":
network = "mainnet"
case "testnet", "mainnet":
default:
cmd.Println(`Network must be either "mainnet" or "testnet"`)
continue
}
break
}
return network
}
func getOutputPath(args []string) string {
if len(args) != 0 {
return args[0]
}
outPath := getPath("File to write config at [./config.yml]: ")
if outPath == "" {
outPath = "./config.yml"
}
return outPath
}
func getWalletAccount(w *wallet.Wallet, prompt string) string {
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
for i := range w.Accounts {
addrs[i] = readline.PcItem(w.Accounts[i].Address)
}
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
defer readline.SetAutoComplete(nil)
s, err := readline.Line(prompt)
fatalOnErr(err)
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
}
func getString(prompt string) string {
s, err := readline.Line(prompt)
fatalOnErr(err)
if s != "" {
_ = readline.AddHistory(s)
}
return s
}
type filenameCompleter struct{}
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
prefix := string(line[:pos])
dir := filepath.Dir(prefix)
de, err := os.ReadDir(dir)
if err != nil {
return nil, 0
}
for i := range de {
name := filepath.Join(dir, de[i].Name())
if strings.HasPrefix(name, prefix) {
tail := []rune(strings.TrimPrefix(name, prefix))
if de[i].IsDir() {
tail = append(tail, filepath.Separator)
}
newLine = append(newLine, tail)
}
}
if pos != 0 {
return newLine, pos - len([]rune(dir))
}
return newLine, 0
}
func getPath(prompt string) string {
readline.SetAutoComplete(filenameCompleter{})
defer readline.SetAutoComplete(nil)
p, err := readline.Line(prompt)
fatalOnErr(err)
if p == "" {
return p
}
_ = readline.AddHistory(p)
abs, err := filepath.Abs(p)
if err != nil {
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
}
return abs
}
func getConfirmation(def bool, prompt string) bool {
for {
s, err := readline.Line(prompt)
fatalOnErr(err)
switch strings.ToLower(s) {
case "y", "yes":
return true
case "n", "no":
return false
default:
if len(s) == 0 {
return def
}
}
}
}
func applyTemplate(c config) []byte {
tmpl, err := template.New("config").Parse(configTemplate)
fatalOnErr(err)
b := bytes.NewBuffer(nil)
fatalOnErr(tmpl.Execute(b, c))
return b.Bytes()
}
func fatalOnErr(err error) {
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
sideClient := initClient(n3config[network].MorphRPC)
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
sideActor, err := actor.NewSimple(sideClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
}
sideGas := nep17.NewReader(sideActor, balanceHash)
accSH := acc.Contract.ScriptHash()
balance, err := sideGas.BalanceOf(accSH)
if err != nil {
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
}
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
fixedn.ToString(balance, 12)))
if !ok {
return
}
amountStr := getString("Enter amount in GAS: ")
amount, err := fixedn.FromString(amountStr, 8)
if err != nil {
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
}
mainClient := initClient(n3config[network].RPC)
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
mainActor, err := actor.NewSimple(mainClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
}
mainGas := nep17.New(mainActor, gas.Hash)
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
if err != nil {
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
}
cmd.Print("Waiting for transactions to persist.")
tick := time.NewTicker(time.Second / 2)
defer tick.Stop()
timer := time.NewTimer(time.Second * 20)
defer timer.Stop()
at := trigger.Application
loop:
for {
select {
case <-tick.C:
_, err := mainClient.GetApplicationLog(txHash, &at)
if err == nil {
cmd.Print("\n")
break loop
}
cmd.Print(".")
case <-timer.C:
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
return
}
os.Exit(1)
}
}
}
func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
DialTimeout: time.Second * 2,
RequestTimeout: time.Second * 5,
})
if err != nil {
continue
}
if err = c.Init(); err != nil {
continue
}
return c
}
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
panic("unreachable")
}

View file

@ -9,6 +9,7 @@ import (
"io" "io"
"os" "os"
"slices" "slices"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@ -76,7 +77,9 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers. // SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID { func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers() list := x.cliRes.Containers()
slices.SortFunc(list, cid.ID.Cmp) slices.SortFunc(list, func(lhs, rhs cid.ID) int {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
})
return list return list
} }
@ -684,7 +687,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err) return nil, fmt.Errorf("read object list: %w", err)
} }
slices.SortFunc(list, oid.ID.Cmp) slices.SortFunc(list, func(a, b oid.ID) int {
return strings.Compare(a.EncodeToString(), b.EncodeToString())
})
return &SearchObjectsRes{ return &SearchObjectsRes{
ids: list, ids: list,
@ -858,8 +863,6 @@ type PatchObjectPrm struct {
ReplaceAttribute bool ReplaceAttribute bool
NewSplitHeader *objectSDK.SplitHeader
PayloadPatches []PayloadPatch PayloadPatches []PayloadPatch
} }
@ -890,11 +893,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err) return nil, fmt.Errorf("init payload reading: %w", err)
} }
if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
NewSplitHeader: prm.NewSplitHeader,
NewAttributes: prm.NewAttributes,
ReplaceAttributes: prm.ReplaceAttribute,
}) {
for _, pp := range prm.PayloadPatches { for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil { if err != nil {

View file

@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{ prmDial := client.PrmDial{
Endpoint: addr.URIAddr(), Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{ GRPCDialOptions: []grpc.DialOption{
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
}, },

View file

@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint" RPC = "rpc-endpoint"
RPCShorthand = "r" RPCShorthand = "r"
RPCDefault = "" RPCDefault = ""
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')" RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
Timeout = "timeout" Timeout = "timeout"
TimeoutShorthand = "t" TimeoutShorthand = "t"

View file

@ -44,7 +44,6 @@ is set to current epoch + n.
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
_ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC))
}, },
} }
@ -82,7 +81,7 @@ func createToken(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
if iatRelative || expRelative || nvbRelative { if iatRelative || expRelative || nvbRelative {
endpoint := viper.GetString(commonflags.RPC) endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
if len(endpoint) == 0 { if len(endpoint) == 0 {
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
} }

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag) outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" { if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644) err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err) commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else { } else {
fmt.Print("\n") fmt.Print("\n")

View file

@ -5,9 +5,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"maps"
"os" "os"
"slices"
"strings" "strings"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@ -23,14 +21,13 @@ import (
type policyPlaygroundREPL struct { type policyPlaygroundREPL struct {
cmd *cobra.Command cmd *cobra.Command
nodes map[string]netmap.NodeInfo nodes map[string]netmap.NodeInfo
console *readline.Instance
} }
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{ return &policyPlaygroundREPL{
cmd: cmd, cmd: cmd,
nodes: map[string]netmap.NodeInfo{}, nodes: map[string]netmap.NodeInfo{},
} }, nil
} }
func (repl *policyPlaygroundREPL) handleLs(args []string) error { func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -40,10 +37,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
i := 1 i := 1
for id, node := range repl.nodes { for id, node := range repl.nodes {
var attrs []string var attrs []string
for k, v := range node.Attributes() { node.IterateAttributes(func(k, v string) {
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
} })
fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
i++ i++
} }
return nil return nil
@ -150,29 +147,12 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error {
for _, node := range ns { for _, node := range ns {
ids = append(ids, hex.EncodeToString(node.PublicKey())) ids = append(ids, hex.EncodeToString(node.PublicKey()))
} }
fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) fmt.Printf("\t%2d: %v\n", i+1, ids)
} }
return nil return nil
} }
func (repl *policyPlaygroundREPL) handleHelp(args []string) error {
if len(args) != 0 {
if _, ok := commands[args[0]]; !ok {
return fmt.Errorf("unknown command: %q", args[0])
}
fmt.Fprintln(repl.console, commands[args[0]].usage)
return nil
}
commandList := slices.Collect(maps.Keys(commands))
slices.Sort(commandList)
for _, command := range commandList {
fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion)
}
return nil
}
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
var nm netmap.NetMap var nm netmap.NetMap
var nodes []netmap.NodeInfo var nodes []netmap.NodeInfo
@ -183,104 +163,15 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
return nm return nm
} }
type commandDescription struct { var policyPlaygroundCompleter = readline.NewPrefixCompleter(
descriprion string readline.PcItem("list"),
usage string readline.PcItem("ls"),
} readline.PcItem("add"),
readline.PcItem("load"),
var commands = map[string]commandDescription{ readline.PcItem("remove"),
"list": { readline.PcItem("rm"),
descriprion: "Display all nodes in the netmap", readline.PcItem("eval"),
usage: `Display all nodes in the netmap )
Example of usage:
list
1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
`,
},
"ls": {
descriprion: "Display all nodes in the netmap",
usage: `Display all nodes in the netmap
Example of usage:
ls
1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
`,
},
"add": {
descriprion: "Add a new node: add <node-hash> attr=value",
usage: `Add a new node
Example of usage:
add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`,
},
"load": {
descriprion: "Load netmap from file: load <path>",
usage: `Load netmap from file
Example of usage:
load "netmap.json"
File format (netmap.json):
{
"03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": {
"continent": "Europe",
"country": "Poland"
},
"02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": {
"continent": "Antarctica",
"country": "Heard Island"
}
}`,
},
"remove": {
descriprion: "Remove a node: remove <node-hash>",
usage: `Remove a node
Example of usage:
remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
},
"rm": {
descriprion: "Remove a node: rm <node-hash>",
usage: `Remove a node
Example of usage:
rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
},
"eval": {
descriprion: "Evaluate a policy: eval <policy>",
usage: `Evaluate a policy
Example of usage:
eval REP 2`,
},
"help": {
descriprion: "Show available commands",
},
}
func (repl *policyPlaygroundREPL) handleCommand(args []string) error {
if len(args) == 0 {
return nil
}
switch args[0] {
case "list", "ls":
return repl.handleLs(args[1:])
case "add":
return repl.handleAdd(args[1:])
case "load":
return repl.handleLoad(args[1:])
case "remove", "rm":
return repl.handleRemove(args[1:])
case "eval":
return repl.handleEval(args[1:])
case "help":
return repl.handleHelp(args[1:])
}
return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0])
}
func (repl *policyPlaygroundREPL) run() error { func (repl *policyPlaygroundREPL) run() error {
if len(viper.GetString(commonflags.RPC)) > 0 { if len(viper.GetString(commonflags.RPC)) > 0 {
@ -299,32 +190,24 @@ func (repl *policyPlaygroundREPL) run() error {
} }
} }
if len(viper.GetString(netmapConfigPath)) > 0 { cmdHandlers := map[string]func([]string) error{
err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) "list": repl.handleLs,
commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) "ls": repl.handleLs,
"add": repl.handleAdd,
"load": repl.handleLoad,
"remove": repl.handleRemove,
"rm": repl.handleRemove,
"eval": repl.handleEval,
} }
var cfgCompleter []readline.PrefixCompleterInterface
var helpSubItems []readline.PrefixCompleterInterface
for name := range commands {
if name != "help" {
cfgCompleter = append(cfgCompleter, readline.PcItem(name))
helpSubItems = append(helpSubItems, readline.PcItem(name))
}
}
cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...))
completer := readline.NewPrefixCompleter(cfgCompleter...)
rl, err := readline.NewEx(&readline.Config{ rl, err := readline.NewEx(&readline.Config{
Prompt: "> ", Prompt: "> ",
InterruptPrompt: "^C", InterruptPrompt: "^C",
AutoComplete: completer, AutoComplete: policyPlaygroundCompleter,
}) })
if err != nil { if err != nil {
return fmt.Errorf("error initializing readline: %w", err) return fmt.Errorf("error initializing readline: %w", err)
} }
repl.console = rl
defer rl.Close() defer rl.Close()
var exit bool var exit bool
@ -342,8 +225,17 @@ func (repl *policyPlaygroundREPL) run() error {
} }
exit = false exit = false
if err := repl.handleCommand(strings.Fields(line)); err != nil { parts := strings.Fields(line)
fmt.Fprintf(repl.console, "error: %v\n", err) if len(parts) == 0 {
continue
}
cmd := parts[0]
if handler, exists := cmdHandlers[cmd]; exists {
if err := handler(parts[1:]); err != nil {
fmt.Printf("error: %v\n", err)
}
} else {
fmt.Printf("error: unknown command %q\n", cmd)
} }
} }
} }
@ -354,19 +246,12 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies. Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd) repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
}, },
} }
const (
netmapConfigPath = "netmap-config"
netmapConfigUsage = "Path to the netmap configuration file"
)
func initContainerPolicyPlaygroundCmd() { func initContainerPolicyPlaygroundCmd() {
commonflags.Init(policyPlaygroundCmd) commonflags.Init(policyPlaygroundCmd)
policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage)
_ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath))
} }

View file

@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
leftMinutes := int(leftSeconds / 60) leftMinutes := int(leftSeconds / 60)
fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
} }
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
hour := int(duration.Seconds() / 3600) hour := int(duration.Seconds() / 3600)
minute := int(duration.Seconds()/60) % 60 minute := int(duration.Seconds()/60) % 60
second := int(duration.Seconds()) % 60 second := int(duration.Seconds()) % 60
fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
} }
} }
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStartedAt() != nil { if resp.GetBody().GetStartedAt() != nil {
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
} }
} }
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if len(resp.GetBody().GetErrorMessage()) > 0 { if len(resp.GetBody().GetErrorMessage()) > 0 {
fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
} }
} }
@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
default: default:
status = "undefined" status = "undefined"
} }
fmt.Fprintf(sb, " Status: %s.", status) sb.WriteString(fmt.Sprintf(" Status: %s.", status))
} }
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
} }
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetEvacuatedObjects(),
resp.GetBody().GetTotalObjects(), resp.GetBody().GetTotalObjects(),
resp.GetBody().GetFailedObjects(), resp.GetBody().GetFailedObjects(),
resp.GetBody().GetSkippedObjects(), resp.GetBody().GetSkippedObjects(),
resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetEvacuatedTrees(),
resp.GetBody().GetTotalTrees(), resp.GetBody().GetTotalTrees(),
resp.GetBody().GetFailedTrees()) resp.GetBody().GetFailedTrees()))
} }
func initControlEvacuationShardCmd() { func initControlEvacuationShardCmd() {

View file

@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n")) _, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets { for i, t := range targets {
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
} }
_ = tw.Flush() _ = tw.Flush()
cmd.Print(buf.String()) cmd.Print(buf.String())

View file

@ -1,117 +0,0 @@
package control
import (
"bytes"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
const (
FullInfoFlag = "full"
FullInfoFlagUsage = "Print full ShardInfo."
)
var locateObjectCmd = &cobra.Command{
Use: "locate-object",
Short: "List shards storing the object",
Long: "List shards storing the object",
Run: locateObject,
}
func initControlLocateObjectCmd() {
initControlFlags(locateObjectCmd)
flags := locateObjectCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
}
func locateObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
_ = object.ReadObjectAddress(cmd, &cnr, &obj)
pk := key.Get(cmd)
body := new(control.ListShardsForObjectRequest_Body)
body.SetContainerId(cnr.EncodeToString())
body.SetObjectId(obj.EncodeToString())
req := new(control.ListShardsForObjectRequest)
req.SetBody(body)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var err error
var resp *control.ListShardsForObjectResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.ListShardsForObject(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
shardIDs := resp.GetBody().GetShard_ID()
isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
if !isFull {
for _, id := range shardIDs {
cmd.Println(base58.Encode(id))
}
return
}
// get full shard info
listShardsReq := new(control.ListShardsRequest)
listShardsReq.SetBody(new(control.ListShardsRequest_Body))
signRequest(cmd, pk, listShardsReq)
var listShardsResp *control.ListShardsResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
listShardsResp, err = control.ListShards(client, listShardsReq)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
shards := listShardsResp.GetBody().GetShards()
sortShardsByID(shards)
shards = filterShards(shards, shardIDs)
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
if isJSON {
prettyPrintShardsJSON(cmd, shards)
} else {
prettyPrintShards(cmd, shards)
}
}
func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
var res []control.ShardInfo
for _, id := range ids {
for _, inf := range info {
if bytes.Equal(inf.Shard_ID, id) {
res = append(res, inf)
}
}
}
return res
}

View file

@ -39,7 +39,6 @@ func init() {
listRulesCmd, listRulesCmd,
getRuleCmd, getRuleCmd,
listTargetsCmd, listTargetsCmd,
locateObjectCmd,
) )
initControlHealthCheckCmd() initControlHealthCheckCmd()
@ -53,5 +52,4 @@ func init() {
initControlListRulesCmd() initControlListRulesCmd()
initControGetRuleCmd() initControGetRuleCmd()
initControlListTargetsCmd() initControlListTargetsCmd()
initControlLocateObjectCmd()
} }

View file

@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse var resp *control.GetNetmapStatusResponse
var err error var err error
err = cli.ExecRaw(func(client *rawclient.Client) error { err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.GetNetmapStatus(cmd.Context(), client, req) resp, err = control.GetNetmapStatus(client, req)
return err return err
}) })
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)

View file

@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
var sealWritecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{
Use: "seal", Use: "seal",
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
Run: sealWritecache, Run: sealWritecache,
} }

View file

@ -62,11 +62,11 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
cmd.Println("state:", stateWord) cmd.Println("state:", stateWord)
for s := range i.NetworkEndpoints() { netmap.IterateNetworkEndpoints(i, func(s string) {
cmd.Println("address:", s) cmd.Println("address:", s)
} })
for key, value := range i.Attributes() { i.IterateAttributes(func(key, value string) {
cmd.Printf("attribute: %s=%s\n", key, value) cmd.Printf("attribute: %s=%s\n", key, value)
} })
} }

View file

@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
} }
objAddr = ReadObjectAddress(cmd, &cnr, &obj) objAddr = readObjectAddress(cmd, &cnr, &obj)
} }
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)

View file

@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID var cnr cid.ID
var obj oid.ID var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj) objAddr := readObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String() filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename) out, closer := createOutWriter(cmd, filename)

View file

@ -41,7 +41,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range") _ = objectHashCmd.MarkFlagRequired("range")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID var cnr cid.ID
var obj oid.ID var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj) objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd) ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err) commonCmd.ExitOnErr(cmd, "", err)

View file

@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID var cnr cid.ID
var obj oid.ID var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj) objAddr := readObjectAddress(cmd, &cnr, &obj)
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)

View file

@ -18,7 +18,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper"
) )
// object lock command. // object lock command.
@ -79,7 +78,7 @@ var objectLockCmd = &cobra.Command{
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() defer cancel()
endpoint := viper.GetString(commonflags.RPC) endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)

View file

@ -7,7 +7,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"slices"
"sync" "sync"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@ -49,12 +48,6 @@ type ecHeader struct {
parent oid.ID parent oid.ID
} }
type objectCounter struct {
sync.Mutex
total uint32
isECcounted bool
}
type objectPlacement struct { type objectPlacement struct {
requiredNodes []netmapSDK.NodeInfo requiredNodes []netmapSDK.NodeInfo
confirmedNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo
@ -63,7 +56,6 @@ type objectPlacement struct {
type objectNodesResult struct { type objectNodesResult struct {
errors []error errors []error
placements map[oid.ID]objectPlacement placements map[oid.ID]objectPlacement
total uint32
} }
type ObjNodesDataObject struct { type ObjNodesDataObject struct {
@ -109,23 +101,23 @@ func initObjectNodesCmd() {
func objectNodes(cmd *cobra.Command, _ []string) { func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID var cnrID cid.ID
var objID oid.ID var objID oid.ID
ReadObjectAddress(cmd, &cnrID, &objID) readObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
getActualPlacement(cmd, netmap, pk, objects, count, result) getActualPlacement(cmd, netmap, pk, objects, result)
printPlacement(cmd, objID, objects, result) printPlacement(cmd, objID, objects, result)
} }
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
var addrObj oid.Address var addrObj oid.Address
addrObj.SetContainer(cnrID) addrObj.SetContainer(cnrID)
addrObj.SetObject(objID) addrObj.SetObject(objID)
@ -153,7 +145,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
parent: res.Header().ECHeader().Parent(), parent: res.Header().ECHeader().Parent(),
} }
} }
return []phyObject{obj}, 1 return []phyObject{obj}
} }
var errSplitInfo *objectSDK.SplitInfoError var errSplitInfo *objectSDK.SplitInfoError
@ -163,34 +155,29 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
var ecInfoError *objectSDK.ECInfoError var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) { if errors.As(err, &ecInfoError) {
return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
} }
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
return nil, 0 return nil
} }
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
} }
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
var total int
splitInfo := errSplitInfo.SplitInfo() splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
if total = len(members); total > 0 { return members
total-- // linking object is not data object
}
return members, total
} }
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
return members, len(members) return members
} }
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
return members, len(members)
} }
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
@ -333,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects { for _, object := range objects {
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement { for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@ -371,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent placementObjectID = object.ecHeader.parent
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement { for _, vector := range placement {
@ -396,11 +383,8 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
} }
} }
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
resultMtx := &sync.Mutex{} resultMtx := &sync.Mutex{}
counter := &objectCounter{
total: uint32(count),
}
candidates := getNodesToCheckObjectExistance(cmd, netmap, result) candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
@ -417,7 +401,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
for _, object := range objects { for _, object := range objects {
eg.Go(func() error { eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock() resultMtx.Lock()
defer resultMtx.Unlock() defer resultMtx.Unlock()
if err == nil && stored { if err == nil && stored {
@ -436,7 +420,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
} }
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
result.total = counter.total
} }
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
@ -461,11 +444,17 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
var cli *client.Client var cli *client.Client
var addresses []string var addresses []string
if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) candidate.IterateNetworkEndpoints(func(s string) bool {
addresses = append(addresses, s)
return false
})
addresses = append(addresses, candidate.ExternalAddresses()...) addresses = append(addresses, candidate.ExternalAddresses()...)
} else { } else {
addresses = append(addresses, candidate.ExternalAddresses()...) addresses = append(addresses, candidate.ExternalAddresses()...)
addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) candidate.IterateNetworkEndpoints(func(s string) bool {
addresses = append(addresses, s)
return false
})
} }
var lastErr error var lastErr error
@ -489,7 +478,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
return cli, nil return cli, nil
} }
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
var addrObj oid.Address var addrObj oid.Address
addrObj.SetContainer(cnrID) addrObj.SetContainer(cnrID)
addrObj.SetObject(objID) addrObj.SetObject(objID)
@ -504,14 +493,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
res, err := internalclient.HeadObject(ctx, prmHead) res, err := internalclient.HeadObject(ctx, prmHead)
if err == nil && res != nil { if err == nil && res != nil {
if res.Header().ECHeader() != nil {
counter.Lock()
defer counter.Unlock()
if !counter.isECcounted {
counter.total *= res.Header().ECHeader().Total()
}
counter.isECcounted = true
}
return true, nil return true, nil
} }
var notFound *apistatus.ObjectNotFound var notFound *apistatus.ObjectNotFound
@ -531,8 +512,7 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
} }
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
for _, object := range objects { for _, object := range objects {
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)

View file

@ -2,7 +2,6 @@ package object
import ( import (
"fmt" "fmt"
"os"
"strconv" "strconv"
"strings" "strings"
@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -22,7 +20,6 @@ const (
replaceAttrsFlagName = "replace-attrs" replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range" rangeFlagName = "range"
payloadFlagName = "payload" payloadFlagName = "payload"
splitHeaderFlagName = "split-header"
) )
var objectPatchCmd = &cobra.Command{ var objectPatchCmd = &cobra.Command{
@ -49,18 +46,17 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
} }
func patch(cmd *cobra.Command, _ []string) { func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID var cnr cid.ID
var obj oid.ID var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj) objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd) ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err) commonCmd.ExitOnErr(cmd, "", err)
@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs prm.ReplaceAttribute = replaceAttrs
prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
for i := range ranges { for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i], Range: ranges[i],
@ -105,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
} }
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
@ -153,22 +149,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName) v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v return v
} }
func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
path, _ := cmd.Flags().GetString(splitHeaderFlagName)
if path == "" {
return nil
}
data, err := os.ReadFile(path)
commonCmd.ExitOnErr(cmd, "read file error: %w", err)
splitHdrV2 := new(objectV2.SplitHeader)
err = splitHdrV2.Unmarshal(data)
if err != nil {
err = splitHdrV2.UnmarshalJSON(data)
commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
}
return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
}

View file

@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
} }
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes") var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc) flags.Bool(rawFlag, false, rawFlagDesc)
} }
@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID var cnr cid.ID
var obj oid.ID var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj) objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd) ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err) commonCmd.ExitOnErr(cmd, "", err)
@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool {
if ok { if ok {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto") toProto, _ := cmd.Flags().GetBool("proto")
if !toJSON && !toProto { if !(toJSON || toProto) {
cmd.PrintErrln("Object is erasure-encoded, ec information received.") cmd.PrintErrln("Object is erasure-encoded, ec information received.")
} }
printECInfo(cmd, errECInfo.ECInfo()) printECInfo(cmd, errECInfo.ECInfo())
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
} }
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range") v := cmd.Flag("range").Value.String()
if len(vs) == 0 || err != nil { if len(v) == 0 {
return nil, err return nil, nil
} }
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs)) rs := make([]objectSDK.Range, len(vs))
for i := range vs { for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep) before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs return xs
} }
func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr) readCID(cmd, cnr)
readOID(cmd, obj) readOID(cmd, obj)
@ -262,8 +262,13 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client
if _, ok := dst.(*internal.DeleteObjectPrm); ok { if _, ok := dst.(*internal.DeleteObjectPrm); ok {
common.PrintVerbose(cmd, "Collecting relatives of the removal object...") common.PrintVerbose(cmd, "Collecting relatives of the removal object...")
objs = collectObjectRelatives(cmd, cli, cnr, *obj) rels := collectObjectRelatives(cmd, cli, cnr, *obj)
objs = append(objs, *obj)
if len(rels) == 0 {
objs = []oid.ID{*obj}
} else {
objs = append(rels, *obj)
}
} }
} }

View file

@ -2,19 +2,18 @@ package tree
import ( import (
"context" "context"
"crypto/tls"
"fmt" "fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
) )
@ -33,29 +32,23 @@ func _client() (tree.TreeServiceClient, error) {
return nil, err return nil, err
} }
host, isTLS, err := client.ParseURI(netAddr.URIAddr())
if err != nil {
return nil, err
}
creds := insecure.NewCredentials()
if isTLS {
creds = credentials.NewTLS(&tls.Config{})
}
opts := []grpc.DialOption{ opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor( grpc.WithChainUnaryInterceptor(
tracing.NewUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
), ),
grpc.WithChainStreamInterceptor( grpc.WithChainStreamInterceptor(
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(),
), ),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
grpc.WithTransportCredentials(creds),
} }
cc, err := grpc.NewClient(host, opts...) if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err return tree.NewTreeServiceClient(cc), err
} }

View file

@ -4,14 +4,11 @@ import (
"context" "context"
"os" "os"
"os/signal" "os/signal"
"strconv"
"syscall" "syscall"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/cast"
"github.com/spf13/viper" "github.com/spf13/viper"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -41,33 +38,13 @@ func reloadConfig() error {
} }
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled")) audit.Store(cfg.GetBool("audit.enabled"))
var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level")) err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil { if err != nil {
return err return err
} }
err = logPrm.SetTags(loggerTags()) logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
if err != nil {
return err
}
logger.UpdateLevelForTags(logPrm)
return nil return logPrm.Reload()
}
func loggerTags() [][]string {
var res [][]string
for i := 0; ; i++ {
var item []string
index := strconv.FormatInt(int64(i), 10)
names := cast.ToString(cfg.Get("logger.tags." + index + ".names"))
if names == "" {
break
}
item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level")))
res = append(res, item)
}
return res
} }
func watchForSignal(ctx context.Context, cancel func()) { func watchForSignal(ctx context.Context, cancel func()) {

View file

@ -31,6 +31,7 @@ const (
var ( var (
wg = new(sync.WaitGroup) wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors intErr = make(chan error) // internal inner ring errors
logPrm = new(logger.Prm)
innerRing *innerring.Server innerRing *innerring.Server
pprofCmp *pprofComponent pprofCmp *pprofComponent
metricsCmp *httpComponent metricsCmp *httpComponent
@ -69,7 +70,6 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics() metrics := irMetrics.NewInnerRingMetrics()
var logPrm logger.Prm
err = logPrm.SetLevelString( err = logPrm.SetLevelString(
cfg.GetString("logger.level"), cfg.GetString("logger.level"),
) )
@ -80,14 +80,10 @@ func main() {
exitErr(err) exitErr(err)
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
err = logPrm.SetTags(loggerTags())
exitErr(err)
log, err = logger.NewLogger(logPrm) log, err = logger.NewLogger(logPrm)
exitErr(err) exitErr(err)
logger.UpdateLevelForTags(logPrm)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent() pprofCmp = newPprofComponent()

View file

@ -2,17 +2,13 @@ package meta
import ( import (
"context" "context"
"encoding/binary"
"errors"
"fmt" "fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview" "github.com/rivo/tview"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.etcd.io/bbolt"
) )
var tuiCMD = &cobra.Command{ var tuiCMD = &cobra.Command{
@ -31,11 +27,6 @@ Available search filters:
var initialPrompt string var initialPrompt string
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
2: schema.MetabaseParserV2,
3: schema.MetabaseParserV3,
}
func init() { func init() {
common.AddComponentPathFlag(tuiCMD, &vPath) common.AddComponentPathFlag(tuiCMD, &vPath)
@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error {
} }
defer db.Close() defer db.Close()
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
if !hasVersion {
return errors.New("couldn't detect schema version")
}
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
if !ok {
return fmt.Errorf("unknown schema version %d", schemaVersion)
}
// Need if app was stopped with Ctrl-C. // Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context()) ctx, cancel := context.WithCancel(cmd.Context())
defer cancel() defer cancel()
app := tview.NewApplication() app := tview.NewApplication()
ui := tui.NewUI(ctx, app, db, metabaseParser, nil) ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID") _ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID") _ = ui.AddFilter("oid", tui.OIDParser, "OID")
@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui) app.SetRoot(ui, true).SetFocus(ui)
return app.Run() return app.Run()
} }
var (
shardInfoBucket = []byte{5}
versionRecord = []byte("version")
)
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
err := db.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(shardInfoBucket)
if bkt == nil {
return nil
}
rec := bkt.Get(versionRecord)
if rec == nil {
return nil
}
version = binary.LittleEndian.Uint64(rec)
ok = true
return nil
})
if err != nil {
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
}
return
}

View file

@ -3,8 +3,6 @@ package common
import ( import (
"errors" "errors"
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
) )
type FilterResult byte type FilterResult byte
@ -73,7 +71,11 @@ func (fp FallbackParser) ToParser() Parser {
func (p Parser) ToFallbackParser() FallbackParser { func (p Parser) ToFallbackParser() FallbackParser {
return func(key, value []byte) (SchemaEntry, Parser) { return func(key, value []byte) (SchemaEntry, Parser) {
entry, next, err := p(key, value) entry, next, err := p(key, value)
assert.NoError(err, "couldn't use that parser as a fallback parser") if err != nil {
panic(fmt.Errorf(
"couldn't use that parser as a fallback parser, it returned an error: %w", err,
))
}
return entry, next return entry, next
} }
} }

View file

@ -80,15 +80,10 @@ var (
}, },
) )
UserAttributeParserV2 = NewUserAttributeKeyBucketParser( UserAttributeParser = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
) )
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
)
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver, cidResolver: StrictResolver,
oidResolver: StrictResolver, oidResolver: StrictResolver,
@ -113,14 +108,4 @@ var (
cidResolver: StrictResolver, cidResolver: StrictResolver,
oidResolver: LenientResolver, oidResolver: LenientResolver,
}) })
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
cidResolver: LenientResolver,
oidResolver: LenientResolver,
})
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
) )

View file

@ -22,8 +22,6 @@ const (
Split Split
ContainerCounters ContainerCounters
ECInfo ECInfo
ExpirationEpochToObject
ObjectToExpirationEpoch
) )
var x = map[Prefix]string{ var x = map[Prefix]string{
@ -45,8 +43,6 @@ var x = map[Prefix]string{
Split: "Split", Split: "Split",
ContainerCounters: "Container Counters", ContainerCounters: "Container Counters",
ECInfo: "EC Info", ECInfo: "EC Info",
ExpirationEpochToObject: "Exp. Epoch to Object",
ObjectToExpirationEpoch: "Object to Exp. Epoch",
} }
func (p Prefix) String() string { func (p Prefix) String() string {

View file

@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string { func (b *PrefixBucket) String() string {
return common.FormatSimple( return common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
) )
} }
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf( return fmt.Sprintf(
"%s CID %s", "%s CID %s",
common.FormatSimple( common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
), ),
common.FormatSimple(b.id.String(), tcell.ColorAqua), common.FormatSimple(b.id.String(), tcell.ColorAqua),
) )
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string { func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s", return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple( common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
), ),
common.FormatSimple( common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,

View file

@ -2,7 +2,6 @@ package buckets
import ( import (
"errors" "errors"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -62,7 +61,6 @@ var (
ErrInvalidKeyLength = errors.New("invalid key length") ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length") ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix") ErrInvalidPrefix = errors.New("invalid prefix")
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
) )
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
} }
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
}
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil { if value != nil {
return nil, nil, ErrNotBucket return nil, nil, ErrNotBucket
@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
return nil, nil, err return nil, nil, err
} }
b.key = string(key[33:]) b.key = string(key[33:])
if len(keys) != 0 && !slices.Contains(keys, b.key) {
return nil, nil, ErrUnexpectedAttributeKey
}
return &b, next, nil return &b, next, nil
} }
} }

View file

@ -5,30 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
) )
var MetabaseParserV3 = common.WithFallback( var MetabaseParser = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
buckets.ContainerVolumeParser,
buckets.LockedParser,
buckets.ShardInfoParser,
buckets.PrimaryParser,
buckets.LockersParser,
buckets.TombstoneParser,
buckets.SmallParser,
buckets.RootParser,
buckets.UserAttributeParserV3,
buckets.ParentParser,
buckets.SplitParser,
buckets.ContainerCountersParser,
buckets.ECInfoParser,
buckets.ExpirationEpochToObjectParser,
buckets.ObjectToExpirationEpochParser,
),
common.RawParser.ToFallbackParser(),
)
var MetabaseParserV2 = common.WithFallback(
common.Any( common.Any(
buckets.GraveyardParser, buckets.GraveyardParser,
buckets.GarbageParser, buckets.GarbageParser,
@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
buckets.SmallParser, buckets.SmallParser,
buckets.RootParser, buckets.RootParser,
buckets.OwnerParser, buckets.OwnerParser,
buckets.UserAttributeParserV2, buckets.UserAttributeParser,
buckets.PayloadHashParser, buckets.PayloadHashParser,
buckets.ParentParser, buckets.ParentParser,
buckets.SplitParser, buckets.SplitParser,

View file

@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string { func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r) return spew.Sdump(*r)
} }
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
return spew.Sdump(*r)
}

View file

@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No return common.No
} }
} }
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "cid":
id := val.(cid.ID)
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}

View file

@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
} }
return &r, nil, nil return &r, nil, nil
} }
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 72 {
return nil, nil, ErrInvalidKeyLength
}
var (
r ExpirationEpochToObjectRecord
err error
)
r.epoch = binary.BigEndian.Uint64(key[:8])
if err = r.cnt.Decode(key[8:40]); err != nil {
return nil, nil, err
}
if err = r.obj.Decode(key[40:]); err != nil {
return nil, nil, err
}
return &r, nil, nil
}
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 32 {
return nil, nil, ErrInvalidKeyLength
}
if len(value) != 8 {
return nil, nil, ErrInvalidValueLength
}
var (
r ObjectToExpirationEpochRecord
err error
)
if err = r.obj.Decode(key); err != nil {
return nil, nil, err
}
r.epoch = binary.LittleEndian.Uint64(value)
return &r, nil, nil
}

View file

@ -2,7 +2,6 @@ package records
import ( import (
"fmt" "fmt"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2" "github.com/gdamore/tcell/v2"
@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
len(r.ids), len(r.ids),
) )
} }
func (r *ExpirationEpochToObjectRecord) String() string {
return fmt.Sprintf(
"exp. epoch %s %c CID %s OID %s",
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
)
}
func (r *ObjectToExpirationEpochRecord) String() string {
return fmt.Sprintf(
"OID %s %c exp. epoch %s",
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
)
}

View file

@ -79,15 +79,4 @@ type (
id oid.ID id oid.ID
ids []oid.ID ids []oid.ID
} }
ExpirationEpochToObjectRecord struct {
epoch uint64
cnt cid.ID
obj oid.ID
}
ObjectToExpirationEpochRecord struct {
obj oid.ID
epoch uint64
}
) )

View file

@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser,
r.addr.SetContainer(cnr) r.addr.SetContainer(cnr)
r.addr.SetObject(obj) r.addr.SetObject(obj)
r.data = value r.data = value[:]
return &r, nil, nil return &r, nil, nil
} }

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path path := parentBucket.Path
parser := parentBucket.NextParser parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer { for item := range buffer {
if item.err != nil { if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
} }
bucket := item.val bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil { if err != nil {
return err return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel() defer cancel()
// Check the current bucket's nested buckets if exist // Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer { for item := range bucketsBuffer {
if item.err != nil { if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
b := item.val b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil { if err != nil {
return false, err return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
// Check the current bucket's nested records if exist // Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer { for item := range recordsBuffer {
if item.err != nil { if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
r := item.val r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value) r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil { if err != nil {
return false, err return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any]( func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T, filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] { ) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize) buffer := make(chan Item[T], bufferSize)
go func() { go func() {
@ -77,13 +77,13 @@ func load[T any](
} }
}() }()
return buffer return buffer, nil
} }
func LoadBuckets( func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] { ) (<-chan Item[*Bucket], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value == nil return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
func LoadRecords( func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] { ) (<-chan Item[*Record], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value != nil return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
// HasBuckets checks if a bucket has nested buckets. It relies on assumption // HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
buffer := load( buffer, err := load(
ctx, db, path, 1, ctx, db, path, 1,
nil, nil,
func(_, value []byte) []byte { return value }, func(_, value []byte) []byte { return value },
) )
if err != nil {
return false, err
}
x, ok := <-buffer x, ok := <-buffer
if !ok { if !ok {
return false, nil return false, nil
} }
if x.err != nil { if x.err != nil {
return false, x.err return false, err
} }
if x.val != nil { if x.val != nil {
return false, nil return false, err
} }
return true, nil return true, nil
} }

View file

@ -1,8 +1,6 @@
package tui package tui
import ( import (
"slices"
"github.com/gdamore/tcell/v2" "github.com/gdamore/tcell/v2"
"github.com/rivo/tview" "github.com/rivo/tview"
) )
@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent. // Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
f.history = append(f.history, s) f.history = append(f.history, s)
} }
@ -53,17 +51,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++ f.historyPointer++
// Stop iterating over history. // Stop iterating over history.
if f.historyPointer == len(f.history) { if f.historyPointer == len(f.history) {
f.SetText(f.currentContent) f.InputField.SetText(f.currentContent)
return return
} }
f.SetText(f.history[f.historyPointer]) f.InputField.SetText(f.history[f.historyPointer])
case tcell.KeyUp: case tcell.KeyUp:
if len(f.history) == 0 { if len(f.history) == 0 {
return return
} }
// Start iterating over history. // Start iterating over history.
if f.historyPointer == len(f.history) { if f.historyPointer == len(f.history) {
f.currentContent = f.GetText() f.currentContent = f.InputField.GetText()
} }
// End of history. // End of history.
if f.historyPointer == 0 { if f.historyPointer == 0 {
@ -71,7 +69,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
} }
// Iterate to least recent prompts. // Iterate to least recent prompts.
f.historyPointer-- f.historyPointer--
f.SetText(f.history[f.historyPointer]) f.InputField.SetText(f.history[f.historyPointer])
default: default:
f.InputField.InputHandler()(event, func(tview.Primitive) {}) f.InputField.InputHandler()(event, func(tview.Primitive) {})
} }

View file

@ -8,7 +8,6 @@ import (
"sync" "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/gdamore/tcell/v2" "github.com/gdamore/tcell/v2"
"github.com/rivo/tview" "github.com/rivo/tview"
) )
@ -63,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx) ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize) v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() { go func() {
@ -71,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer { for item := range tempBuffer {
if item.err != nil { if item.err != nil {
v.ui.stopOnError(item.err) v.ui.stopOnError(err)
break break
} }
record := item.val record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil { if err != nil {
v.ui.stopOnError(err) v.ui.stopOnError(err)
@ -95,7 +96,9 @@ func (v *RecordsView) Mount(ctx context.Context) error {
} }
func (v *RecordsView) Unmount() { func (v *RecordsView) Unmount() {
assert.False(v.onUnmount == nil, "try to unmount not mounted component") if v.onUnmount == nil {
panic("try to unmount not mounted component")
}
v.onUnmount() v.onUnmount()
v.onUnmount = nil v.onUnmount = nil
} }

View file

@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
return return
} }
switch v := ui.mountedPage.(type) { switch ui.mountedPage.(type) {
case *BucketsView: case *BucketsView:
ui.moveNextPage(NewBucketsView(ui, res)) ui.moveNextPage(NewBucketsView(ui, res))
case *RecordsView: case *RecordsView:
bucket := v.bucket bucket := ui.mountedPage.(*RecordsView).bucket
ui.moveNextPage(NewRecordsView(ui, bucket, res)) ui.moveNextPage(NewRecordsView(ui, bucket, res))
} }
@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
} }
ui.MouseHandler() ui.Box.MouseHandler()
} }
func (ui *UI) WithPrompt(prompt string) error { func (ui *UI) WithPrompt(prompt string) error {

View file

@ -14,7 +14,7 @@ import (
func initAPEManagerService(c *cfg) { func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage( contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
c.key, c.shared.key,
c.cfgMorph.proxyScriptHash, c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)

View file

@ -1,30 +1,22 @@
package main package main
import ( import (
"bytes"
"cmp"
"context"
"slices"
"sync" "sync"
"sync/atomic"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/hashicorp/golang-lru/v2/expirable" "github.com/hashicorp/golang-lru/v2/expirable"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.uber.org/zap"
) )
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type netValueReader[K any, V any] func(K) (V, error)
type valueWithError[V any] struct { type valueWithError[V any] struct {
v V v V
@ -57,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL. // updates the value from the network on cache miss or by TTL.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { func (c *ttlNetCache[K, V]) get(key K) (V, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -79,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
return val.v, val.e return val.v, val.e
} }
v, err := c.netRdr(ctx, key) v, err := c.netRdr(key)
c.cache.Add(key, &valueWithError[V]{ c.cache.Add(key, &valueWithError[V]{
v: v, v: v,
@ -117,6 +109,55 @@ func (c *ttlNetCache[K, V]) remove(key K) {
hit = c.cache.Remove(key) hit = c.cache.Remove(key)
} }
// entity that provides LRU cache interface.
type lruNetCache struct {
cache *lru.Cache[uint64, *netmapSDK.NetMap]
netRdr netValueReader[uint64, *netmapSDK.NetMap]
metrics cacheMetrics
}
// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache {
cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
fatalOnErr(err)
return &lruNetCache{
cache: cache,
netRdr: netRdr,
metrics: metrics,
}
}
// reads value by the key.
//
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
}()
val, ok := c.cache.Get(key)
if ok {
hit = true
return val, nil
}
val, err := c.netRdr(key)
if err != nil {
return nil, err
}
c.cache.Add(key, val)
return val, nil
}
// wrapper over TTL cache of values read from the network // wrapper over TTL cache of values read from the network
// that implements container storage. // that implements container storage.
type ttlContainerStorage struct { type ttlContainerStorage struct {
@ -125,11 +166,11 @@ type ttlContainerStorage struct {
} }
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(ctx, id) return v.Get(id)
}, metrics.NewCacheMetrics("container")) }, metrics.NewCacheMetrics("container"))
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(ctx, id) return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info")) }, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{ return ttlContainerStorage{
@ -147,245 +188,43 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache // Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache. // or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(ctx, cnr) return s.containerCache.get(cnr)
} }
func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(ctx, cnr) return s.delInfoCache.get(cnr)
} }
type lruNetmapSource struct { type lruNetmapSource struct {
netState netmap.State netState netmap.State
client rawSource cache *lruNetCache
cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
mtx sync.RWMutex
metrics cacheMetrics
log *logger.Logger
candidates atomic.Pointer[[]netmapSDK.NodeInfo]
} }
type rawSource interface { func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
}
func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
) netmap.Source {
const netmapCacheSize = 10 const netmapCacheSize = 10
cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
fatalOnErr(err) return v.GetNetMapByEpoch(key)
}, metrics.NewCacheMetrics("netmap"))
src := &lruNetmapSource{ return &lruNetmapSource{
netState: netState, netState: s,
client: client, cache: lruNetmapCache,
cache: cache,
log: log,
metrics: metrics.NewCacheMetrics("netmap"),
}
wg.Add(1)
go func() {
defer wg.Done()
src.updateCandidates(ctx, d)
}()
return src
}
// updateCandidates routine to merge netmap in cache with candidates list.
func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
timer := time.NewTimer(d)
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
newCandidates, err := s.client.GetCandidates(ctx)
if err != nil {
s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
timer.Reset(d)
break
}
if len(newCandidates) == 0 {
s.candidates.Store(&newCandidates)
timer.Reset(d)
break
}
slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
return cmp.Compare(n1.Hash(), n2.Hash())
})
// Check once state changed
v := s.candidates.Load()
if v == nil {
s.candidates.Store(&newCandidates)
s.mergeCacheWithCandidates(newCandidates)
timer.Reset(d)
break
}
ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
uint32(n1.Status()) != uint32(n2.Status()) ||
slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
return 1
}
ne1 := slices.Collect(n1.NetworkEndpoints())
ne2 := slices.Collect(n2.NetworkEndpoints())
return slices.Compare(ne1, ne2)
})
if ret != 0 {
s.candidates.Store(&newCandidates)
s.mergeCacheWithCandidates(newCandidates)
}
timer.Reset(d)
}
} }
} }
func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
s.mtx.Lock() return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
tmp := s.cache.Values()
s.mtx.Unlock()
for _, pointer := range tmp {
nm := pointer.Load()
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
nm = nm.Clone()
mergeNetmapWithCandidates(updates, nm)
pointer.Store(nm)
}
}
} }
// reads value by the key. func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
// return s.getNetMapByEpoch(epoch)
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
}()
s.mtx.RLock()
val, ok := s.cache.Get(key)
s.mtx.RUnlock()
if ok {
hit = true
return val.Load(), nil
} }
s.mtx.Lock() func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
defer s.mtx.Unlock() val, err := s.cache.get(epoch)
val, ok = s.cache.Get(key)
if ok {
hit = true
return val.Load(), nil
}
nm, err := s.client.GetNetMapByEpoch(ctx, key)
if err != nil {
return nil, err
}
v := s.candidates.Load()
if v != nil {
updates := getNetMapNodesToUpdate(nm, *v)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
}
p := atomic.Pointer[netmapSDK.NetMap]{}
p.Store(nm)
s.cache.Add(key, &p)
return nm, nil
}
// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
for _, v := range updates {
if v.status != netmapSDK.UnspecifiedState {
nm.Nodes()[v.netmapIndex].SetStatus(v.status)
}
if v.externalAddresses != nil {
nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
}
if v.endpoints != nil {
nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
}
}
}
type nodeToUpdate struct {
netmapIndex int
status netmapSDK.NodeState
externalAddresses []string
endpoints []string
}
// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
var res []nodeToUpdate
for i := range nm.Nodes() {
for _, cnd := range candidates {
if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
var tmp nodeToUpdate
var update bool
if cnd.Status() != nm.Nodes()[i].Status() &&
(cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
update = true
tmp.status = cnd.Status()
}
externalAddresses := cnd.ExternalAddresses()
if externalAddresses != nil &&
slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
update = true
tmp.externalAddresses = externalAddresses
}
nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints())
candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints())
if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
update = true
tmp.endpoints = candidateEndpoints
}
if update {
tmp.netmapIndex = i
res = append(res, tmp)
}
break
}
}
}
return res
}
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff)
}
func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, epoch)
}
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.get(ctx, epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -393,7 +232,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil return val, nil
} }
func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil return s.netState.CurrentEpoch(), nil
} }
@ -401,10 +240,7 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte] *ttlNetCache[struct{}, [][]byte]
} }
func newCachedIRFetcher(f interface { func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
InnerRingKeys(ctx context.Context) ([][]byte, error)
},
) cachedIRFetcher {
const ( const (
irFetcherCacheSize = 1 // we intend to store only one value irFetcherCacheSize = 1 // we intend to store only one value
@ -418,8 +254,8 @@ func newCachedIRFetcher(f interface {
) )
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(ctx context.Context, _ struct{}) ([][]byte, error) { func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys(ctx) return f.InnerRingKeys()
}, metrics.NewCacheMetrics("ir_keys"), }, metrics.NewCacheMetrics("ir_keys"),
) )
@ -429,8 +265,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates // the cache or expired, then it returns keys from side chain and updates
// the cache. // the cache.
func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(ctx, struct{}{}) val, err := f.get(struct{}{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -453,7 +289,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
} }
} }
func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30 const ttl = time.Second * 30
hit := false hit := false
@ -475,7 +311,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
c.mtx.Lock() c.mtx.Lock()
size = c.lastSize size = c.lastSize
if !c.lastUpdated.After(prevUpdated) { if !c.lastUpdated.After(prevUpdated) {
size = c.src.MaxObjectSize(ctx) size = c.src.MaxObjectSize()
c.lastSize = size c.lastSize = size
c.lastUpdated = time.Now() c.lastUpdated = time.Now()
} }

View file

@ -1,13 +1,10 @@
package main package main
import ( import (
"context"
"errors" "errors"
"sync"
"testing" "testing"
"time" "time"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -20,7 +17,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, ti, val) require.Equal(t, ti, val)
}) })
@ -29,7 +26,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration) time.Sleep(2 * ttlDuration)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
@ -38,20 +35,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
cache.remove(key) cache.remove(key)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
t.Run("Test Cache Error", func(t *testing.T) { t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error")) cache.set("error", time.Now(), errors.New("mock error"))
_, err := cache.get(context.Background(), "error") _, err := cache.get("error")
require.Error(t, err) require.Error(t, err)
require.Equal(t, "mock error", err.Error()) require.Equal(t, "mock error", err.Error())
}) })
} }
func testNetValueReader(_ context.Context, key string) (time.Time, error) { func testNetValueReader(key string) (time.Time, error) {
if key == "error" { if key == "error" {
return time.Now(), errors.New("mock error") return time.Now(), errors.New("mock error")
} }
@ -61,75 +58,3 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) {
type noopCacheMetricts struct{} type noopCacheMetricts struct{}
func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
type rawSrc struct{}
func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
node0 := netmapSDK.NodeInfo{}
node0.SetPublicKey([]byte{byte(1)})
node0.SetStatus(netmapSDK.Online)
node0.SetExternalAddresses("1", "0")
node0.SetNetworkEndpoints("1", "0")
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey([]byte{byte(1)})
node1.SetStatus(netmapSDK.Online)
node1.SetExternalAddresses("1", "0")
node1.SetNetworkEndpoints("1", "0")
return []netmapSDK.NodeInfo{node0, node1}, nil
}
func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
nm := netmapSDK.NetMap{}
nm.SetEpoch(1)
node0 := netmapSDK.NodeInfo{}
node0.SetPublicKey([]byte{byte(1)})
node0.SetStatus(netmapSDK.Maintenance)
node0.SetExternalAddresses("0")
node0.SetNetworkEndpoints("0")
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey([]byte{byte(1)})
node1.SetStatus(netmapSDK.Maintenance)
node1.SetExternalAddresses("0")
node1.SetNetworkEndpoints("0")
nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
return &nm, nil
}
type st struct{}
func (s *st) CurrentEpoch() uint64 {
return 1
}
func TestNetmapStorage(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
wg := sync.WaitGroup{}
cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
nm, err := cache.GetNetMapByEpoch(ctx, 1)
require.NoError(t, err)
require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
require.Eventually(t, func() bool {
nm, err := cache.GetNetMapByEpoch(ctx, 1)
require.NoError(t, err)
for _, node := range nm.Nodes() {
if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
node.NumberOfNetworkEndpoints() == 2) {
return false
}
}
return true
}, time.Second*5, time.Millisecond*10)
cancel()
wg.Wait()
}

View file

@ -33,14 +33,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@ -71,7 +69,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -109,8 +106,6 @@ type applicationConfiguration struct {
level string level string
destination string destination string
timestamp bool timestamp bool
options []zap.Option
tags [][]string
} }
ObjectCfg struct { ObjectCfg struct {
@ -120,6 +115,7 @@ type applicationConfiguration struct {
EngineCfg struct { EngineCfg struct {
errorThreshold uint32 errorThreshold uint32
shardPoolSize uint32
shards []shardCfg shards []shardCfg
lowMem bool lowMem bool
} }
@ -129,13 +125,15 @@ type applicationConfiguration struct {
} }
type shardCfg struct { type shardCfg struct {
compression compression.Config compress bool
estimateCompressibility bool
estimateCompressibilityThreshold float64
smallSizeObjectLimit uint64 smallSizeObjectLimit uint64
uncompressableContentType []string
refillMetabase bool refillMetabase bool
refillMetabaseWorkersCount int refillMetabaseWorkersCount int
mode shardmode.Mode mode shardmode.Mode
limiter qos.Limiter
metaCfg struct { metaCfg struct {
path string path string
@ -232,71 +230,62 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
var opts []zap.Option
if loggerconfig.ToLokiConfig(c).Enabled {
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
return lokiCore
})}
}
a.LoggerCfg.options = opts
a.LoggerCfg.tags = loggerconfig.Tags(c)
// Object // Object
a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
locodeDBPath := nodeconfig.LocodeDBPath(c) var pm []placement.Metric
parser, err := placement.NewMetricsParser(locodeDBPath) for _, raw := range objectconfig.Get(c).Priority() {
m, err := placement.ParseMetric(raw)
if err != nil { if err != nil {
return fmt.Errorf("metrics parser creation: %w", err) return err
} }
m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) pm = append(pm, m)
if err != nil {
return fmt.Errorf("parse metrics: %w", err)
} }
a.ObjectCfg.priorityMetrics = m a.ObjectCfg.priorityMetrics = pm
// Storage Engine // Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
} }
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
var target shardCfg var newConfig shardCfg
target.refillMetabase = source.RefillMetabase() newConfig.refillMetabase = oldConfig.RefillMetabase()
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
target.mode = source.Mode() newConfig.mode = oldConfig.Mode()
target.compression = source.Compression() newConfig.compress = oldConfig.Compress()
target.smallSizeObjectLimit = source.SmallSizeLimit() newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
a.setShardWriteCacheConfig(&target, source) a.setShardWriteCacheConfig(&newConfig, oldConfig)
a.setShardPiloramaConfig(c, &target, source) a.setShardPiloramaConfig(c, &newConfig, oldConfig)
if err := a.setShardStorageConfig(&target, source); err != nil { if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
return err return err
} }
a.setMetabaseConfig(&target, source) a.setMetabaseConfig(&newConfig, oldConfig)
a.setGCConfig(&target, source) a.setGCConfig(&newConfig, oldConfig)
if err := a.setLimiter(&target, source); err != nil {
return err
}
a.EngineCfg.shards = append(a.EngineCfg.shards, target) a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
return nil return nil
} }
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
writeCacheCfg := source.WriteCache() writeCacheCfg := oldConfig.WriteCache()
if writeCacheCfg.Enabled() { if writeCacheCfg.Enabled() {
wc := &target.writecacheCfg wc := &newConfig.writecacheCfg
wc.enabled = true wc.enabled = true
wc.path = writeCacheCfg.Path() wc.path = writeCacheCfg.Path()
@ -309,10 +298,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so
} }
} }
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") { if config.BoolSafe(c.Sub("tree"), "enabled") {
piloramaCfg := source.Pilorama() piloramaCfg := oldConfig.Pilorama()
pr := &target.piloramaCfg pr := &newConfig.piloramaCfg
pr.enabled = true pr.enabled = true
pr.path = piloramaCfg.Path() pr.path = piloramaCfg.Path()
@ -323,8 +312,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
} }
} }
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
blobStorCfg := source.BlobStor() blobStorCfg := oldConfig.BlobStor()
storagesCfg := blobStorCfg.Storages() storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg)) ss := make([]subStorageCfg, 0, len(storagesCfg))
@ -358,13 +347,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
ss = append(ss, sCfg) ss = append(ss, sCfg)
} }
target.subStorages = ss newConfig.subStorages = ss
return nil return nil
} }
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
metabaseCfg := source.Metabase() metabaseCfg := oldConfig.Metabase()
m := &target.metaCfg m := &newConfig.metaCfg
m.path = metabaseCfg.Path() m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm() m.perm = metabaseCfg.BoltDB().Perm()
@ -372,22 +361,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
} }
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
gcCfg := source.GC() gcCfg := oldConfig.GC()
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
limitsConfig := source.Limits().ToConfig()
limiter, err := qos.NewLimiter(limitsConfig)
if err != nil {
return err
}
target.limiter = limiter
return nil
} }
// internals contains application-specific internals that are created // internals contains application-specific internals that are created
@ -477,6 +456,7 @@ type shared struct {
// dynamicConfiguration stores parameters of the // dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations. // components that supports runtime reconfigurations.
type dynamicConfiguration struct { type dynamicConfiguration struct {
logger *logger.Prm
pprof *httpComponent pprof *httpComponent
metrics *httpComponent metrics *httpComponent
} }
@ -513,7 +493,6 @@ type cfg struct {
cfgNetmap cfgNetmap cfgNetmap cfgNetmap
cfgControlService cfgControlService cfgControlService cfgControlService
cfgObject cfgObject cfgObject cfgObject
cfgQoSService cfgQoSService
} }
// ReadCurrentNetMap reads network map which has been cached at the // ReadCurrentNetMap reads network map which has been cached at the
@ -548,8 +527,6 @@ type cfgGRPC struct {
maxChunkSize uint64 maxChunkSize uint64
maxAddrAmount uint64 maxAddrAmount uint64
reconnectTimeout time.Duration reconnectTimeout time.Duration
limiter atomic.Pointer[limiting.SemaphoreLimiter]
} }
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
@ -686,6 +663,10 @@ type cfgAccessPolicyEngine struct {
} }
type cfgObjectRoutines struct { type cfgObjectRoutines struct {
putRemote *ants.Pool
putLocal *ants.Pool
replication *ants.Pool replication *ants.Pool
} }
@ -722,7 +703,12 @@ func initCfg(appCfg *config.Config) *cfg {
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm) log, err := logger.NewLogger(logPrm)
fatalOnErr(err) fatalOnErr(err)
logger.UpdateLevelForTags(logPrm) if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
}
c.internals = initInternals(appCfg, log) c.internals = initInternals(appCfg, log)
@ -866,14 +852,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
} }
} }
func initCfgGRPC() (cfg cfgGRPC) { func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
cfg.maxChunkSize = maxChunkSize return cfgGRPC{
cfg.maxAddrAmount = maxAddrAmount maxChunkSize: maxChunkSize,
maxAddrAmount: maxAddrAmount,
return }
} }
func initCfgObject(appCfg *config.Config) cfgObject { func initCfgObject(appCfg *config.Config) cfgObject {
@ -890,8 +876,9 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option var opts []engine.Option
opts = append(opts, opts = append(opts,
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold), engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log.WithTag(logger.TagEngine)), engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
) )
@ -928,8 +915,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxCacheSize(wcRead.sizeLimit), writecache.WithMaxCacheSize(wcRead.sizeLimit),
writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync), writecache.WithNoSync(wcRead.noSync),
writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), writecache.WithLogger(c.log),
writecache.WithQoSLimiter(shCfg.limiter),
) )
} }
return writeCacheOpts return writeCacheOpts
@ -968,8 +954,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval),
blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), blobovniczatree.WithLogger(c.log),
blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)),
blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit),
} }
@ -992,7 +977,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
fstree.WithPerm(sRead.perm), fstree.WithPerm(sRead.perm),
fstree.WithDepth(sRead.depth), fstree.WithDepth(sRead.depth),
fstree.WithNoSync(sRead.noSync), fstree.WithNoSync(sRead.noSync),
fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), fstree.WithLogger(c.log),
} }
if c.metricsCollector != nil { if c.metricsCollector != nil {
fstreeOpts = append(fstreeOpts, fstreeOpts = append(fstreeOpts,
@ -1022,9 +1007,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
ss := c.getSubstorageOpts(ctx, shCfg) ss := c.getSubstorageOpts(ctx, shCfg)
blobstoreOpts := []blobstor.Option{ blobstoreOpts := []blobstor.Option{
blobstor.WithCompression(shCfg.compression), blobstor.WithCompressObjects(shCfg.compress),
blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility),
blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold),
blobstor.WithStorages(ss), blobstor.WithStorages(ss),
blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), blobstor.WithLogger(c.log),
} }
if c.metricsCollector != nil { if c.metricsCollector != nil {
blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore())))
@ -1043,13 +1031,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
} }
if c.metricsCollector != nil { if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
} }
var sh shardOptsWithID var sh shardOptsWithID
sh.configID = shCfg.id() sh.configID = shCfg.id()
sh.shOpts = []shard.Option{ sh.shOpts = []shard.Option{
shard.WithLogger(c.log.WithTag(logger.TagShard)), shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
shard.WithMode(shCfg.mode), shard.WithMode(shCfg.mode),
@ -1068,33 +1055,30 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return pool return pool
}), }),
shard.WithLimiter(shCfg.limiter),
} }
return sh return sh
} }
func (c *cfg) loggerPrm() (logger.Prm, error) { func (c *cfg) loggerPrm() (*logger.Prm, error) {
var prm logger.Prm // check if it has been inited before
// (re)init read configuration if c.dynamicConfiguration.logger == nil {
err := prm.SetLevelString(c.LoggerCfg.level) c.dynamicConfiguration.logger = new(logger.Prm)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
}
err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
prm.PrependTimestamp = c.LoggerCfg.timestamp
prm.Options = c.LoggerCfg.options
err = prm.SetTags(c.LoggerCfg.tags)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination)
} }
return prm, nil // (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log level format: " + c.LoggerCfg.level)
}
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger, nil
} }
func (c *cfg) LocalAddress() network.AddressGroup { func (c *cfg) LocalAddress() network.AddressGroup {
@ -1163,7 +1147,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg) cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
} }
@ -1182,7 +1166,21 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error var err error
optNonBlocking := ants.WithNonblocking(true)
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
fatalOnErr(err)
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
fatalOnErr(err)
replicatorPoolSize := replicatorconfig.PoolSize(cfg) replicatorPoolSize := replicatorconfig.PoolSize(cfg)
if replicatorPoolSize <= 0 {
replicatorPoolSize = putRemoteCapacity
}
pool.replication, err = ants.NewPool(replicatorPoolSize) pool.replication, err = ants.NewPool(replicatorPoolSize)
fatalOnErr(err) fatalOnErr(err)
@ -1208,7 +1206,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
} }
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(ctx, epoch) ni, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch), zap.Uint64("epoch", epoch),
@ -1334,7 +1332,15 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support // all the components are expected to support
// Logger's dynamic reconfiguration approach // Logger's dynamic reconfiguration approach
components := c.getComponents(ctx) // Logger
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm)
// Object // Object
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
@ -1372,17 +1378,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
} }
func (c *cfg) getComponents(ctx context.Context) []dCmp { func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
var components []dCmp var components []dCmp
components = append(components, dCmp{"logger", func() error { components = append(components, dCmp{"logger", logPrm.Reload})
prm, err := c.loggerPrm()
if err != nil {
return err
}
logger.UpdateLevelForTags(prm)
return nil
}})
components = append(components, dCmp{"runtime", func() error { components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(ctx, c) setRuntimeParameters(ctx, c)
return nil return nil
@ -1415,13 +1414,17 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
} }
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
return components return components
} }
func (c *cfg) reloadPools() error { func (c *cfg) reloadPools() error {
newSize := replicatorconfig.PoolSize(c.appCfg) newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
newSize = replicatorconfig.PoolSize(c.appCfg)
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
return nil return nil

View file

@ -1,7 +1,6 @@
package config package config
import ( import (
"slices"
"strings" "strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used // It supports only one level of nesting and is intended to be used
// to provide default values. // to provide default values.
func (x *Config) SetDefault(from *Config) { func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path) x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
} }

View file

@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) { func TestConfigDir(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
cfgFileName := path.Join(dir, "cfg_01.yml") cfgFileName0 := path.Join(dir, "cfg_00.json")
cfgFileName1 := path.Join(dir, "cfg_01.yml")
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "") c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
} }

View file

@ -11,6 +11,10 @@ import (
const ( const (
subsection = "storage" subsection = "storage"
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
) )
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found. // ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil return nil
} }
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
//
// Returns ShardPoolSizeDefault if the value is not a positive number.
func ShardPoolSize(c *config.Config) uint32 {
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
if v > 0 {
return v
}
return ShardPoolSizeDefault
}
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
// //
// Returns 0 if the the value is missing. // Returns 0 if the the value is missing.

View file

@ -14,8 +14,6 @@ import (
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -55,6 +53,7 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled) require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
}) })
@ -64,6 +63,7 @@ func TestEngineSection(t *testing.T) {
num := 0 num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() { defer func() {
@ -76,7 +76,6 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages() ss := blob.Storages()
pl := sc.Pilorama() pl := sc.Pilorama()
gc := sc.GC() gc := sc.GC()
limits := sc.Limits()
switch num { switch num {
case 0: case 0:
@ -101,11 +100,10 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
require.Equal(t, true, sc.Compression().Enabled) require.Equal(t, true, sc.Compress())
require.Equal(t, compression.LevelFastest, sc.Compression().Level) require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes())
require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) require.Equal(t, true, sc.EstimateCompressibility())
require.Equal(t, true, sc.Compression().EstimateCompressibility) require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold())
require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold)
require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss)) require.Equal(t, 2, len(ss))
@ -136,86 +134,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
readLimits := limits.ToConfig().Read
writeLimits := limits.ToConfig().Write
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
require.ElementsMatch(t, readLimits.Tags,
[]qos.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(20),
ReservedOps: toPtr(1000),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(70),
ReservedOps: toPtr(10000),
},
{
Tag: "background",
Weight: toPtr(5),
LimitOps: toPtr(10000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
Prohibited: true,
},
{
Tag: "treesync",
Weight: toPtr(5),
LimitOps: toPtr(25),
},
})
require.ElementsMatch(t, writeLimits.Tags,
[]qos.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(200),
ReservedOps: toPtr(100),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(700),
ReservedOps: toPtr(1000),
},
{
Tag: "background",
Weight: toPtr(50),
LimitOps: toPtr(1000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
{
Tag: "policer",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
{
Tag: "treesync",
Weight: toPtr(50),
LimitOps: toPtr(100),
},
})
case 1: case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm()) require.Equal(t, fs.FileMode(0o644), pl.Perm())
@ -238,9 +156,8 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
require.Equal(t, false, sc.Compression().Enabled) require.Equal(t, false, sc.Compress())
require.Equal(t, compression.LevelDefault, sc.Compression().Level) require.Equal(t, []string(nil), sc.UncompressableContentTypes())
require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes)
require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.EqualValues(t, 102400, sc.SmallSizeLimit())
require.Equal(t, 2, len(ss)) require.Equal(t, 2, len(ss))
@ -271,17 +188,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
readLimits := limits.ToConfig().Read
writeLimits := limits.ToConfig().Write
require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout)
require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps)
require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps)
require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout)
require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps)
require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps)
require.Equal(t, 0, len(readLimits.Tags))
require.Equal(t, 0, len(writeLimits.Tags))
} }
return nil return nil
}) })
@ -295,7 +201,3 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest) configtest.ForEnvFileType(t, path, fileConfigTest)
}) })
} }
func toPtr(v float64) *float64 {
return &v
}

View file

@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration { func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay") d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0) if d < 0 {
d = 0
}
return d
} }
// MaxBatchSize returns the value of "max_batch_size" config parameter. // MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int { func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0) if s < 0 {
s = 0
}
return s
} }
// NoSync returns the value of "no_sync" config parameter. // NoSync returns the value of "no_sync" config parameter.
@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number. // Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int { func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
return max(s, 0) if s < 0 {
s = 0
}
return s
} }

View file

@ -4,11 +4,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
) )
@ -28,27 +26,42 @@ func From(c *config.Config) *Config {
return (*Config)(c) return (*Config)(c)
} }
func (x *Config) Compression() compression.Config { // Compress returns the value of "compress" config parameter.
cc := (*config.Config)(x).Sub("compression") //
if cc == nil { // Returns false if the value is not a valid bool.
return compression.Config{} func (x *Config) Compress() bool {
return config.BoolSafe(
(*config.Config)(x),
"compress",
)
} }
return compression.Config{
Enabled: config.BoolSafe(cc, "enabled"), // UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter.
UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), //
Level: compression.Level(config.StringSafe(cc, "level")), // Returns nil if a the value is missing or is invalid.
EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), func (x *Config) UncompressableContentTypes() []string {
EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), return config.StringSliceSafe(
(*config.Config)(x),
"compression_exclude_content_types")
} }
// EstimateCompressibility returns the value of "estimate_compressibility" config parameter.
//
// Returns false if the value is not a valid bool.
func (x *Config) EstimateCompressibility() bool {
return config.BoolSafe(
(*config.Config)(x),
"compression_estimate_compressibility",
)
} }
// EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter.
// //
// Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0].
func estimateCompressibilityThreshold(c *config.Config) float64 { func (x *Config) EstimateCompressibilityThreshold() float64 {
v := config.FloatOrDefault( v := config.FloatOrDefault(
c, (*config.Config)(x),
"estimate_compressibility_threshold", "compression_estimate_compressibility_threshold",
EstimateCompressibilityThresholdDefault) EstimateCompressibilityThresholdDefault)
if v < 0.0 || v > 1.0 { if v < 0.0 || v > 1.0 {
return EstimateCompressibilityThresholdDefault return EstimateCompressibilityThresholdDefault
@ -112,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config {
) )
} }
// Limits returns "limits" subsection as a limitsconfig.Config.
func (x *Config) Limits() *limitsconfig.Config {
return limitsconfig.From(
(*config.Config)(x).
Sub("limits"),
)
}
// RefillMetabase returns the value of "resync_metabase" config parameter. // RefillMetabase returns the value of "resync_metabase" config parameter.
// //
// Returns false if the value is not a valid bool. // Returns false if the value is not a valid bool.

Some files were not shown because too many files have changed in this diff Show more