Compare commits

..

1 commit

Author SHA1 Message Date
ae64e09ead
[#1294] docs: Fix description of shard switching mode
All checks were successful
DCO action / DCO (pull_request) Successful in 43s
Vulncheck / Vulncheck (pull_request) Successful in 1m35s
Pre-commit hooks / Pre-commit (pull_request) Successful in 1m52s
Build / Build Components (pull_request) Successful in 1m57s
Tests and linters / Run gofumpt (pull_request) Successful in 3m11s
Tests and linters / Lint (pull_request) Successful in 3m24s
Tests and linters / Staticcheck (pull_request) Successful in 3m39s
Tests and linters / Tests with -race (pull_request) Successful in 3m54s
Tests and linters / Tests (pull_request) Successful in 4m4s
Tests and linters / gopls check (pull_request) Successful in 4m23s
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2025-03-04 19:43:10 +03:00
222 changed files with 4286 additions and 4353 deletions

87
.ci/Jenkinsfile vendored
View file

@ -1,87 +0,0 @@
def golang = ['1.23', '1.24']
def golangDefault = "golang:${golang.last()}"
async {
for (version in golang) {
def go = version
task("test/go${go}") {
container("golang:${go}") {
sh 'make test'
}
}
task("build/go${go}") {
container("golang:${go}") {
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
sh """
make bin/frostfs-${app}
bin/frostfs-${app} --version
"""
}
}
}
}
task('test/race') {
container(golangDefault) {
sh 'make test GOFLAGS="-count=1 -race"'
}
}
task('lint') {
container(golangDefault) {
sh 'make lint-install lint'
}
}
task('staticcheck') {
container(golangDefault) {
sh 'make staticcheck-install staticcheck-run'
}
}
task('gopls') {
container(golangDefault) {
sh 'make gopls-install gopls-run'
}
}
task('gofumpt') {
container(golangDefault) {
sh '''
make fumpt-install
make fumpt
git diff --exit-code --quiet
'''
}
}
task('vulncheck') {
container(golangDefault) {
sh '''
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...
'''
}
}
task('pre-commit') {
dockerfile("""
FROM ${golangDefault}
RUN apt update && \
apt install -y --no-install-recommends pre-commit
""") {
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
}
}
}
task('dco') {
container('git.frostfs.info/truecloudlab/commit-check:master') {
sh 'FROM=pull_request_target commit-check'
}
}
}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.23', '1.24' ]
go_versions: [ '1.22', '1.23' ]
steps:
- uses: actions/checkout@v3

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -21,7 +21,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.24
go-version: 1.23
- name: Set up Python
run: |
apt update

View file

@ -16,7 +16,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install linters
@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.23', '1.24' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.22'
cache: true
- name: Run tests
@ -68,7 +68,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install staticcheck
@ -104,7 +104,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install gofumpt

View file

@ -18,7 +18,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
check-latest: true
- name: Install govulncheck

View file

@ -22,11 +22,9 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
gci:
sections:
- standard
- default
custom-order: true
govet:
# report about shadowed variables
check-shadowing: false
staticcheck:
checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
funlen:
@ -57,8 +55,8 @@ linters-settings:
linters:
enable:
# mandatory linters
- govet
- revive
- predeclared
# some default golangci-lint linters
- errcheck
@ -74,7 +72,6 @@ linters:
- durationcheck
- exhaustive
- copyloopvar
- gci
- gofmt
- goimports
- misspell
@ -91,8 +88,8 @@ linters:
- testifylint
- protogetter
- intrange
- tenv
- unconvert
- unparam
- usetesting
disable-all: true
fast: false

View file

@ -1,6 +1,5 @@
#!/usr/bin/make -f
SHELL = bash
.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@ -8,16 +7,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.23
LINT_VERSION ?= 1.64.8
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64
endif
STATICCHECK_VERSION ?= 2025.1.1
STATICCHECK_VERSION ?= 2024.1.1
ARCH = amd64
BIN = bin
@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
GOPLS_VERSION ?= v0.17.1
GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp)
@ -116,7 +115,7 @@ protoc:
# Install protoc
protoc-install:
@rm -rf $(PROTOBUF_DIR)
@mkdir -p $(PROTOBUF_DIR)
@mkdir $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@ -170,7 +169,7 @@ imports:
# Install gofumpt
fumpt-install:
@rm -rf $(GOFUMPT_DIR)
@mkdir -p $(GOFUMPT_DIR)
@mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt
@ -187,37 +186,14 @@ test:
@echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./...
# Install Gerrit commit-msg hook
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
review-install:
@git config remote.review.url \
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
@mkdir -p $(GIT_HOOK_DIR)/
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
@chmod +x $(GIT_HOOK_DIR)/commit-msg
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
# Create a PR in Gerrit
review: BRANCH ?= master
review:
@git push review HEAD:refs/for/$(BRANCH) \
--push-option r=e.stratonikov@yadro.com \
--push-option r=d.stepanov@yadro.com \
--push-option r=an.nikiforov@yadro.com \
--push-option r=a.arifullin@yadro.com \
--push-option r=ekaterina.lebedeva@yadro.com \
--push-option r=a.savchuk@yadro.com \
--push-option r=a.chuprov@yadro.com
# Run pre-commit
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
lint-install: $(BIN)
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@ -236,7 +212,7 @@ lint:
# Install staticcheck
staticcheck-install:
@rm -rf $(STATICCHECK_DIR)
@mkdir -p $(STATICCHECK_DIR)
@mkdir $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck
@ -249,7 +225,7 @@ staticcheck-run:
# Install gopls
gopls-install:
@rm -rf $(GOPLS_DIR)
@mkdir -p $(GOPLS_DIR)
@mkdir $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
}
assert.NoError(w.Err)
if w.Err != nil {
panic(w.Err)
}
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
assert.NoError(w.Err)
if w.Err != nil {
panic(w.Err)
}
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {

View file

@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
nbuf := make([]byte, 8)
copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
}

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
@ -236,7 +235,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
assert.NoError(bw.Err)
if bw.Err != nil {
panic(bw.Err)
}
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
assert.NoError(writer.Err, "can't create deployment script")
if writer.Err != nil {
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
}
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err
@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
assert.NoError(bw.Err, "can't create deployment script")
if bw.Len() != start {
if bw.Err != nil {
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
} else if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)

View file

@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@ -220,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE()))
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()
@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
assert.NoError(sub.Err, "can't create version script")
if sub.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
bw.WriteBytes(script)
bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
assert.NoError(bw.Err, "can't create version script")
if bw.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {

View file

@ -34,7 +34,7 @@ const (
subjectNameFlag = "subject-name"
subjectKeyFlag = "subject-key"
subjectAddressFlag = "subject-address"
extendedFlag = "extended"
includeNamesFlag = "include-names"
groupNameFlag = "group-name"
groupIDFlag = "group-id"
@ -209,7 +209,7 @@ func initFrostfsIDListSubjectsCmd() {
Cmd.AddCommand(frostfsidListSubjectsCmd)
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
}
func initFrostfsIDCreateGroupCmd() {
@ -256,7 +256,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)")
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
}
func initFrostfsIDSetKVCmd() {
@ -336,7 +336,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
}
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
extended, _ := cmd.Flags().GetBool(extendedFlag)
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
@ -349,19 +349,21 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
for _, addr := range subAddresses {
if !extended {
if !includeNames {
cmd.Println(address.Uint160ToString(addr))
continue
}
items, err := reader.GetSubject(addr)
sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, addr, subj)
cmd.Println()
cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
}
}
@ -481,7 +483,7 @@ func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd)
extended, _ := cmd.Flags().GetBool(extendedFlag)
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
@ -499,7 +501,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
for _, subjAddr := range subjects {
if !extended {
if !includeNames {
cmd.Println(address.Uint160ToString(subjAddr))
continue
}
@ -508,8 +510,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
printSubjectInfo(cmd, subjAddr, subj)
cmd.Println()
cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
}
}
@ -599,30 +600,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui
return inv, cs, nmHash
}
func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) {
cmd.Printf("Address: %s\n", address.Uint160ToString(addr))
pk := "<nil>"
if subj.PrimaryKey != nil {
pk = subj.PrimaryKey.String()
}
cmd.Printf("Primary key: %s\n", pk)
cmd.Printf("Name: %s\n", subj.Name)
cmd.Printf("Namespace: %s\n", subj.Namespace)
if len(subj.AdditionalKeys) > 0 {
cmd.Printf("Additional keys:\n")
for _, key := range subj.AdditionalKeys {
k := "<nil>"
if key != nil {
k = key.String()
}
cmd.Printf("- %s\n", k)
}
}
if len(subj.KV) > 0 {
cmd.Printf("KV:\n")
for k, v := range subj.KV {
cmd.Printf("- %s: %s\n", k, v)
}
}
}

View file

@ -6,7 +6,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -14,7 +13,9 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
}
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
switch c.(type) {
case *rpcclient.Client:
inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
default:
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
}
return b, nil
}
}
func CheckNotaryEnabled(c Client) error {

View file

@ -13,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@ -22,7 +21,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
@ -30,6 +28,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@ -376,7 +375,9 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen
}
act, err = actor.New(c.Client, signers)
} else {
assert.False(withConsensus, "BUG: should never happen")
if withConsensus {
panic("BUG: should never happen")
}
act, err = c.CommitteeAct, nil
}
if err != nil {
@ -410,9 +411,11 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp
func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
version, err := c.Client.GetVersion()
// error appears only if client
// has not been initialized
assert.NoError(err)
if err != nil {
// error appears only if client
// has not been initialized
panic(err)
}
network := version.Protocol.Network
// Use parameter context to avoid dealing with signature order.
@ -444,12 +447,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin
for i := range tx.Signers {
if tx.Signers[i].Account == h {
assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
if i < len(tx.Scripts) {
tx.Scripts[i] = *w
}
if i == len(tx.Scripts) {
} else if i == len(tx.Scripts) {
tx.Scripts = append(tx.Scripts, *w)
} else {
panic("BUG: invalid signing order")
}
return nil
}
@ -507,7 +510,9 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
assert.NoError(bw.Err)
if bw.Err != nil {
panic(bw.Err)
}
return bw.Bytes(), false, nil
}
@ -519,8 +524,12 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
}
func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone))
return !avail, err
res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
if err != nil {
return false, err
}
return res.State == vmstate.Halt.String(), nil
}
func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/core"
@ -317,7 +316,9 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint
func (l *LocalClient) putTransactions() error {
// 1. Prepare new block.
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
assert.NoError(err)
if err != nil {
panic(err)
}
defer func() { l.transactions = l.transactions[:0] }()
b := &block.Block{
@ -358,7 +359,9 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s
w := io.NewBufBinWriter()
emit.Array(w.BinWriter, parameters...)
emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
if w.Err != nil {
panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
}
return c.InvokeScript(w.Bytes(), signers)
}

View file

@ -3,7 +3,6 @@ package helper
import (
"errors"
"fmt"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err
}
for k, v := range m {
if slices.Contains(NetmapConfigKeys, k) {
md[k] = v
for _, key := range NetmapConfigKeys {
if k == key {
md[k] = v
break
}
}
}
return nil

View file

@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -112,7 +111,9 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
assert.NoError(w.Err, "can't wrap register script")
if w.Err != nil {
panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
}
}
func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {

View file

@ -1,18 +1,21 @@
package initialize
import (
"errors"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -27,8 +30,7 @@ const (
)
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
reader := neo.NewReader(c.ReadOnlyInvoker)
regPrice, err := reader.GetRegisterPrice()
regPrice, err := getCandidateRegisterPrice(c)
if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err)
}
@ -40,7 +42,9 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
emit.Opcodes(w.BinWriter, opcode.ASSERT)
}
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
assert.NoError(w.Err)
if w.Err != nil {
panic(fmt.Sprintf("BUG: %v", w.Err))
}
signers := []actor.SignerAccount{{
Signer: c.GetSigner(false, c.CommitteeAcc),
@ -112,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash
ok, err := transferNEOFinished(c)
ok, err := transferNEOFinished(c, neoHash)
if ok || err != nil {
return err
}
@ -135,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx()
}
func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
r := neo.NewReader(c.ReadOnlyInvoker)
func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
}
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
switch c.Client.(type) {
case *rpcclient.Client:
inv := invoker.New(c.Client, nil)
reader := neo.NewReader(inv)
return reader.GetRegisterPrice()
default:
neoHash := neo.Hash
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
if err != nil {
return 0, err
}
if len(res.Stack) == 0 {
return 0, errGetPriceInvalid
}
bi, err := res.Stack[0].TryInteger()
if err != nil || !bi.IsInt64() {
return 0, errGetPriceInvalid
}
return bi.Int64(), nil
}
}

View file

@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
_ = tw.Flush()
cmd.Print(buf.String())

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@ -40,6 +41,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))

View file

@ -0,0 +1,137 @@
package storagecfg
const configTemplate = `logger:
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
node:
wallet:
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
addresses: # list of addresses announced by Storage node in the Network map
- {{ .AnnouncedAddress }}
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
grpc:
num: 1 # total number of listener endpoints
0:
endpoint: {{ .Endpoint }} # endpoint for gRPC server
tls:{{if .TLSCert}}
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
certificate: {{ .TLSCert }} # path to TLS certificate
key: {{ .TLSKey }} # path to TLS key
{{- else }}
enabled: false # disable TLS for a gRPC connection
{{- end}}
control:
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
{{- range .AuthorizedKeys }}
- {{.}}{{end}}
grpc:
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
morph:
dial_timeout: 20s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # use TTL cache for side chain GET operations
rpc_endpoint: # side chain N3 RPC endpoints
{{- range .MorphRPC }}
- address: wss://{{.}}/ws{{end}}
{{if not .Relay }}
storage:
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard:
default: # section with the default shard parameters
metabase:
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
blobstor:
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 2 # max depth of object tree storage in FS
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
compress: true # turn on/off Zstandard compression (level 3) of stored objects
compression_exclude_content_types:
- audio/*
- video/*
blobovnicza:
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
gc:
remover_batch_size: 200 # number of objects to be removed by the garbage collector
remover_sleep_interval: 5m # frequency of the garbage collector invocation
0:
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
metabase:
path: {{ .MetabasePath }} # path to the metabase
blobstor:
path: {{ .BlobstorPath }} # path to the blobstor
{{end}}`
const (
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
)
var n3config = map[string]struct {
MorphRPC []string
RPC []string
NeoFSContract string
BalanceContract string
}{
"testnet": {
MorphRPC: []string{
"rpc01.morph.testnet.fs.neo.org:51331",
"rpc02.morph.testnet.fs.neo.org:51331",
"rpc03.morph.testnet.fs.neo.org:51331",
"rpc04.morph.testnet.fs.neo.org:51331",
"rpc05.morph.testnet.fs.neo.org:51331",
"rpc06.morph.testnet.fs.neo.org:51331",
"rpc07.morph.testnet.fs.neo.org:51331",
},
RPC: []string{
"rpc01.testnet.n3.nspcc.ru:21331",
"rpc02.testnet.n3.nspcc.ru:21331",
"rpc03.testnet.n3.nspcc.ru:21331",
"rpc04.testnet.n3.nspcc.ru:21331",
"rpc05.testnet.n3.nspcc.ru:21331",
"rpc06.testnet.n3.nspcc.ru:21331",
"rpc07.testnet.n3.nspcc.ru:21331",
},
NeoFSContract: neofsTestnetAddress,
BalanceContract: balanceTestnetAddress,
},
"mainnet": {
MorphRPC: []string{
"rpc1.morph.fs.neo.org:40341",
"rpc2.morph.fs.neo.org:40341",
"rpc3.morph.fs.neo.org:40341",
"rpc4.morph.fs.neo.org:40341",
"rpc5.morph.fs.neo.org:40341",
"rpc6.morph.fs.neo.org:40341",
"rpc7.morph.fs.neo.org:40341",
},
RPC: []string{
"rpc1.n3.nspcc.ru:10331",
"rpc2.n3.nspcc.ru:10331",
"rpc3.n3.nspcc.ru:10331",
"rpc4.n3.nspcc.ru:10331",
"rpc5.n3.nspcc.ru:10331",
"rpc6.n3.nspcc.ru:10331",
"rpc7.n3.nspcc.ru:10331",
},
NeoFSContract: neofsMainnetAddress,
BalanceContract: balanceMainnetAddress,
},
}

View file

@ -0,0 +1,433 @@
package storagecfg
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
"time"
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/chzyer/readline"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
)
const (
walletFlag = "wallet"
accountFlag = "account"
)
const (
defaultControlEndpoint = "localhost:8090"
defaultDataEndpoint = "localhost"
)
// RootCmd is a root command of config section.
var RootCmd = &cobra.Command{
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
Short: "Section for storage node configuration commands",
Run: storageConfig,
}
func init() {
fs := RootCmd.Flags()
fs.StringP(walletFlag, "w", "", "Path to wallet")
fs.StringP(accountFlag, "a", "", "Wallet account")
}
type config struct {
AnnouncedAddress string
AuthorizedKeys []string
ControlEndpoint string
Endpoint string
TLSCert string
TLSKey string
MorphRPC []string
Attribute struct {
Locode string
}
Wallet struct {
Path string
Account string
Password string
}
Relay bool
BlobstorPath string
MetabasePath string
}
func storageConfig(cmd *cobra.Command, args []string) {
outPath := getOutputPath(args)
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
readline.SetHistoryPath(historyPath)
var c config
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
if c.Wallet.Path == "" {
c.Wallet.Path = getPath("Path to the storage node wallet: ")
}
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
fatalOnErr(err)
fillWalletAccount(cmd, &c, w)
accH, err := flags.ParseAddress(c.Wallet.Account)
fatalOnErr(err)
acc := w.GetAccount(accH)
if acc == nil {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
fatalOnErr(err)
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
network := readNetwork(cmd)
c.MorphRPC = n3config[network].MorphRPC
depositGas(cmd, acc, network)
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
endpoint := getDefaultEndpoint(cmd, &c)
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
if c.Endpoint == "" {
c.Endpoint = endpoint
}
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
if c.ControlEndpoint == "" {
c.ControlEndpoint = defaultControlEndpoint
}
c.TLSCert = getPath("TLS Certificate (optional): ")
if c.TLSCert != "" {
c.TLSKey = getPath("TLS Key: ")
}
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
if !c.Relay {
p := getPath("Path to the storage directory (all available storage will be used): ")
c.BlobstorPath = filepath.Join(p, "blob")
c.MetabasePath = filepath.Join(p, "meta")
}
out := applyTemplate(c)
fatalOnErr(os.WriteFile(outPath, out, 0o644))
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
}
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
var addr, port string
for {
c.AnnouncedAddress = getString("Publicly announced address: ")
validator := netutil.Address{}
err := validator.FromString(c.AnnouncedAddress)
if err != nil {
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
continue
}
uriAddr, err := url.Parse(validator.URIAddr())
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
addr = uriAddr.Hostname()
port = uriAddr.Port()
ip, err := net.ResolveIPAddr("ip", addr)
if err != nil {
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
continue
}
if !ip.IP.IsGlobalUnicast() {
cmd.Println("IP must be global unicast.")
continue
}
cmd.Printf("Resolved IP address: %s\n", ip.String())
_, err = strconv.ParseUint(port, 10, 16)
if err != nil {
cmd.Println("Port must be an integer.")
continue
}
break
}
return net.JoinHostPort(defaultDataEndpoint, port)
}
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
if c.Wallet.Account == "" {
addr := address.Uint160ToString(w.GetChangeAddress())
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
if c.Wallet.Account == "" {
c.Wallet.Account = addr
}
}
}
func readNetwork(cmd *cobra.Command) string {
var network string
for {
network = getString("Choose network [mainnet]/testnet: ")
switch network {
case "":
network = "mainnet"
case "testnet", "mainnet":
default:
cmd.Println(`Network must be either "mainnet" or "testnet"`)
continue
}
break
}
return network
}
func getOutputPath(args []string) string {
if len(args) != 0 {
return args[0]
}
outPath := getPath("File to write config at [./config.yml]: ")
if outPath == "" {
outPath = "./config.yml"
}
return outPath
}
func getWalletAccount(w *wallet.Wallet, prompt string) string {
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
for i := range w.Accounts {
addrs[i] = readline.PcItem(w.Accounts[i].Address)
}
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
defer readline.SetAutoComplete(nil)
s, err := readline.Line(prompt)
fatalOnErr(err)
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
}
func getString(prompt string) string {
s, err := readline.Line(prompt)
fatalOnErr(err)
if s != "" {
_ = readline.AddHistory(s)
}
return s
}
type filenameCompleter struct{}
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
prefix := string(line[:pos])
dir := filepath.Dir(prefix)
de, err := os.ReadDir(dir)
if err != nil {
return nil, 0
}
for i := range de {
name := filepath.Join(dir, de[i].Name())
if strings.HasPrefix(name, prefix) {
tail := []rune(strings.TrimPrefix(name, prefix))
if de[i].IsDir() {
tail = append(tail, filepath.Separator)
}
newLine = append(newLine, tail)
}
}
if pos != 0 {
return newLine, pos - len([]rune(dir))
}
return newLine, 0
}
func getPath(prompt string) string {
readline.SetAutoComplete(filenameCompleter{})
defer readline.SetAutoComplete(nil)
p, err := readline.Line(prompt)
fatalOnErr(err)
if p == "" {
return p
}
_ = readline.AddHistory(p)
abs, err := filepath.Abs(p)
if err != nil {
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
}
return abs
}
func getConfirmation(def bool, prompt string) bool {
for {
s, err := readline.Line(prompt)
fatalOnErr(err)
switch strings.ToLower(s) {
case "y", "yes":
return true
case "n", "no":
return false
default:
if len(s) == 0 {
return def
}
}
}
}
func applyTemplate(c config) []byte {
tmpl, err := template.New("config").Parse(configTemplate)
fatalOnErr(err)
b := bytes.NewBuffer(nil)
fatalOnErr(tmpl.Execute(b, c))
return b.Bytes()
}
func fatalOnErr(err error) {
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
sideClient := initClient(n3config[network].MorphRPC)
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
sideActor, err := actor.NewSimple(sideClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
}
sideGas := nep17.NewReader(sideActor, balanceHash)
accSH := acc.Contract.ScriptHash()
balance, err := sideGas.BalanceOf(accSH)
if err != nil {
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
}
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
fixedn.ToString(balance, 12)))
if !ok {
return
}
amountStr := getString("Enter amount in GAS: ")
amount, err := fixedn.FromString(amountStr, 8)
if err != nil {
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
}
mainClient := initClient(n3config[network].RPC)
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
mainActor, err := actor.NewSimple(mainClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
}
mainGas := nep17.New(mainActor, gas.Hash)
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
if err != nil {
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
}
cmd.Print("Waiting for transactions to persist.")
tick := time.NewTicker(time.Second / 2)
defer tick.Stop()
timer := time.NewTimer(time.Second * 20)
defer timer.Stop()
at := trigger.Application
loop:
for {
select {
case <-tick.C:
_, err := mainClient.GetApplicationLog(txHash, &at)
if err == nil {
cmd.Print("\n")
break loop
}
cmd.Print(".")
case <-timer.C:
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
return
}
os.Exit(1)
}
}
}
func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
DialTimeout: time.Second * 2,
RequestTimeout: time.Second * 5,
})
if err != nil {
continue
}
if err = c.Init(); err != nil {
continue
}
return c
}
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
panic("unreachable")
}

View file

@ -858,8 +858,6 @@ type PatchObjectPrm struct {
ReplaceAttribute bool
NewSplitHeader *objectSDK.SplitHeader
PayloadPatches []PayloadPatch
}
@ -890,11 +888,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err)
}
if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
NewSplitHeader: prm.NewSplitHeader,
NewAttributes: prm.NewAttributes,
ReplaceAttributes: prm.ReplaceAttribute,
}) {
if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil {

View file

@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{
Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
},

View file

@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
leftMinutes := int(leftSeconds / 60)
fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes)
sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
}
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
hour := int(duration.Seconds() / 3600)
minute := int(duration.Seconds()/60) % 60
second := int(duration.Seconds()) % 60
fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second)
sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
}
}
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStartedAt() != nil {
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339))
sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
}
}
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if len(resp.GetBody().GetErrorMessage()) > 0 {
fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage())
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
}
}
@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
default:
status = "undefined"
}
fmt.Fprintf(sb, " Status: %s.", status)
sb.WriteString(fmt.Sprintf(" Status: %s.", status))
}
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
}
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
resp.GetBody().GetEvacuatedObjects(),
resp.GetBody().GetTotalObjects(),
resp.GetBody().GetFailedObjects(),
resp.GetBody().GetSkippedObjects(),
resp.GetBody().GetEvacuatedTrees(),
resp.GetBody().GetTotalTrees(),
resp.GetBody().GetFailedTrees())
resp.GetBody().GetFailedTrees()))
}
func initControlEvacuationShardCmd() {

View file

@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets {
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
}
_ = tw.Flush()
cmd.Print(buf.String())

View file

@ -1,117 +0,0 @@
package control
import (
"bytes"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
const (
FullInfoFlag = "full"
FullInfoFlagUsage = "Print full ShardInfo."
)
var locateObjectCmd = &cobra.Command{
Use: "locate-object",
Short: "List shards storing the object",
Long: "List shards storing the object",
Run: locateObject,
}
func initControlLocateObjectCmd() {
initControlFlags(locateObjectCmd)
flags := locateObjectCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
}
func locateObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
_ = object.ReadObjectAddress(cmd, &cnr, &obj)
pk := key.Get(cmd)
body := new(control.ListShardsForObjectRequest_Body)
body.SetContainerId(cnr.EncodeToString())
body.SetObjectId(obj.EncodeToString())
req := new(control.ListShardsForObjectRequest)
req.SetBody(body)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var err error
var resp *control.ListShardsForObjectResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.ListShardsForObject(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
shardIDs := resp.GetBody().GetShard_ID()
isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
if !isFull {
for _, id := range shardIDs {
cmd.Println(base58.Encode(id))
}
return
}
// get full shard info
listShardsReq := new(control.ListShardsRequest)
listShardsReq.SetBody(new(control.ListShardsRequest_Body))
signRequest(cmd, pk, listShardsReq)
var listShardsResp *control.ListShardsResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
listShardsResp, err = control.ListShards(client, listShardsReq)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
shards := listShardsResp.GetBody().GetShards()
sortShardsByID(shards)
shards = filterShards(shards, shardIDs)
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
if isJSON {
prettyPrintShardsJSON(cmd, shards)
} else {
prettyPrintShards(cmd, shards)
}
}
func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
var res []control.ShardInfo
for _, id := range ids {
for _, inf := range info {
if bytes.Equal(inf.Shard_ID, id) {
res = append(res, inf)
}
}
}
return res
}

View file

@ -39,7 +39,6 @@ func init() {
listRulesCmd,
getRuleCmd,
listTargetsCmd,
locateObjectCmd,
)
initControlHealthCheckCmd()
@ -53,5 +52,4 @@ func init() {
initControlListRulesCmd()
initControGetRuleCmd()
initControlListTargetsCmd()
initControlLocateObjectCmd()
}

View file

@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{
var sealWritecacheShardCmd = &cobra.Command{
Use: "seal",
Short: "Flush objects from write-cache and move write-cache to degraded read only mode.",
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.",
Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.",
Run: sealWritecache,
}

View file

@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
objAddr = ReadObjectAddress(cmd, &cnr, &obj)
objAddr = readObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)

View file

@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename)

View file

@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)

View file

@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)

View file

@ -48,12 +48,6 @@ type ecHeader struct {
parent oid.ID
}
type objectCounter struct {
sync.Mutex
total uint32
isECcounted bool
}
type objectPlacement struct {
requiredNodes []netmapSDK.NodeInfo
confirmedNodes []netmapSDK.NodeInfo
@ -62,7 +56,6 @@ type objectPlacement struct {
type objectNodesResult struct {
errors []error
placements map[oid.ID]objectPlacement
total uint32
}
type ObjNodesDataObject struct {
@ -108,23 +101,23 @@ func initObjectNodesCmd() {
func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID
var objID oid.ID
ReadObjectAddress(cmd, &cnrID, &objID)
readObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk)
objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
getActualPlacement(cmd, netmap, pk, objects, count, result)
getActualPlacement(cmd, netmap, pk, objects, result)
printPlacement(cmd, objID, objects, result)
}
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) {
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@ -152,7 +145,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
parent: res.Header().ECHeader().Parent(),
}
}
return []phyObject{obj}, 1
return []phyObject{obj}
}
var errSplitInfo *objectSDK.SplitInfoError
@ -162,34 +155,29 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1
return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
}
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
return nil, 0
return nil
}
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) {
members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
}
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) {
var total int
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
if total = len(members); total > 0 {
total-- // linking object is not data object
}
return members, total
return members
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
return members, len(members)
return members
}
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
return members, len(members)
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
}
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
@ -395,11 +383,8 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
}
}
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) {
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
resultMtx := &sync.Mutex{}
counter := &objectCounter{
total: uint32(count),
}
candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
@ -416,7 +401,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
for _, object := range objects {
eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter)
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock()
defer resultMtx.Unlock()
if err == nil && stored {
@ -435,7 +420,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
result.total = counter.total
}
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
@ -494,7 +478,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
return cli, nil
}
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) {
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@ -509,14 +493,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
res, err := internalclient.HeadObject(ctx, prmHead)
if err == nil && res != nil {
if res.Header().ECHeader() != nil {
counter.Lock()
defer counter.Unlock()
if !counter.isECcounted {
counter.total *= res.Header().ECHeader().Total()
}
counter.isECcounted = true
}
return true, nil
}
var notFound *apistatus.ObjectNotFound
@ -536,8 +512,7 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
}
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total)
fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
for _, object := range objects {
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)

View file

@ -2,7 +2,6 @@ package object
import (
"fmt"
"os"
"strconv"
"strings"
@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -22,7 +20,6 @@ const (
replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range"
payloadFlagName = "payload"
splitHeaderFlagName = "split-header"
)
var objectPatchCmd = &cobra.Command{
@ -53,14 +50,13 @@ func initObjectPatchCmd() {
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
}
func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs
prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i],
@ -153,22 +147,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v
}
func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
path, _ := cmd.Flags().GetString(splitHeaderFlagName)
if path == "" {
return nil
}
data, err := os.ReadFile(path)
commonCmd.ExitOnErr(cmd, "read file error: %w", err)
splitHdrV2 := new(objectV2.SplitHeader)
err = splitHdrV2.Unmarshal(data)
if err != nil {
err = splitHdrV2.UnmarshalJSON(data)
commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
}
return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
}

View file

@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)

View file

@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs
}
func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)

View file

@ -2,19 +2,17 @@ package tree
import (
"context"
"crypto/tls"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@ -33,29 +31,21 @@ func _client() (tree.TreeServiceClient, error) {
return nil, err
}
host, isTLS, err := client.ParseURI(netAddr.URIAddr())
if err != nil {
return nil, err
}
creds := insecure.NewCredentials()
if isTLS {
creds = credentials.NewTLS(&tls.Config{})
}
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
tracing.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
tracing.NewStreamClientInterceptor(),
),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
grpc.WithTransportCredentials(creds),
}
cc, err := grpc.NewClient(host, opts...)
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err
}

View file

@ -9,7 +9,6 @@ import (
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@ -39,14 +38,13 @@ func reloadConfig() error {
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled"))
var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
log.Reload(logPrm)
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
return nil
return logPrm.Reload()
}
func watchForSignal(ctx context.Context, cancel func()) {

View file

@ -31,6 +31,7 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@ -69,7 +70,6 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics()
var logPrm logger.Prm
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)

View file

@ -2,17 +2,13 @@ package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@ -31,11 +27,6 @@ Available search filters:
var initialPrompt string
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
2: schema.MetabaseParserV2,
3: schema.MetabaseParserV3,
}
func init() {
common.AddComponentPathFlag(tuiCMD, &vPath)
@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error {
}
defer db.Close()
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
if !hasVersion {
return errors.New("couldn't detect schema version")
}
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
if !ok {
return fmt.Errorf("unknown schema version %d", schemaVersion)
}
// Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
app := tview.NewApplication()
ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
var (
shardInfoBucket = []byte{5}
versionRecord = []byte("version")
)
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
err := db.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(shardInfoBucket)
if bkt == nil {
return nil
}
rec := bkt.Get(versionRecord)
if rec == nil {
return nil
}
version = binary.LittleEndian.Uint64(rec)
ok = true
return nil
})
if err != nil {
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
}
return
}

View file

@ -3,8 +3,6 @@ package common
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
type FilterResult byte
@ -73,7 +71,11 @@ func (fp FallbackParser) ToParser() Parser {
func (p Parser) ToFallbackParser() FallbackParser {
return func(key, value []byte) (SchemaEntry, Parser) {
entry, next, err := p(key, value)
assert.NoError(err, "couldn't use that parser as a fallback parser")
if err != nil {
panic(fmt.Errorf(
"couldn't use that parser as a fallback parser, it returned an error: %w", err,
))
}
return entry, next
}
}

View file

@ -80,15 +80,10 @@ var (
},
)
UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
UserAttributeParser = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
)
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
)
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: StrictResolver,
@ -113,14 +108,4 @@ var (
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
cidResolver: LenientResolver,
oidResolver: LenientResolver,
})
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
)

View file

@ -22,31 +22,27 @@ const (
Split
ContainerCounters
ECInfo
ExpirationEpochToObject
ObjectToExpirationEpoch
)
var x = map[Prefix]string{
Graveyard: "Graveyard",
Garbage: "Garbage",
ToMoveIt: "To Move It",
ContainerVolume: "Container Volume",
Locked: "Locked",
ShardInfo: "Shard Info",
Primary: "Primary",
Lockers: "Lockers",
Tombstone: "Tombstone",
Small: "Small",
Root: "Root",
Owner: "Owner",
UserAttribute: "User Attribute",
PayloadHash: "Payload Hash",
Parent: "Parent",
Split: "Split",
ContainerCounters: "Container Counters",
ECInfo: "EC Info",
ExpirationEpochToObject: "Exp. Epoch to Object",
ObjectToExpirationEpoch: "Object to Exp. Epoch",
Graveyard: "Graveyard",
Garbage: "Garbage",
ToMoveIt: "To Move It",
ContainerVolume: "Container Volume",
Locked: "Locked",
ShardInfo: "Shard Info",
Primary: "Primary",
Lockers: "Lockers",
Tombstone: "Tombstone",
Small: "Small",
Root: "Root",
Owner: "Owner",
UserAttribute: "User Attribute",
PayloadHash: "Payload Hash",
Parent: "Parent",
Split: "Split",
ContainerCounters: "Container Counters",
ECInfo: "EC Info",
}
func (p Prefix) String() string {

View file

@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string {
return common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
)
}
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf(
"%s CID %s",
common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(b.id.String(), tcell.ColorAqua),
)
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,

View file

@ -2,7 +2,6 @@ package buckets
import (
"errors"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -58,11 +57,10 @@ var (
)
var (
ErrNotBucket = errors.New("not a bucket")
ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix")
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
ErrNotBucket = errors.New("not a bucket")
ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix")
)
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
}
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
}
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil {
return nil, nil, ErrNotBucket
@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
return nil, nil, err
}
b.key = string(key[33:])
if len(keys) != 0 && !slices.Contains(keys, b.key) {
return nil, nil, ErrUnexpectedAttributeKey
}
return &b, next, nil
}
}

View file

@ -5,30 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
)
var MetabaseParserV3 = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
buckets.ContainerVolumeParser,
buckets.LockedParser,
buckets.ShardInfoParser,
buckets.PrimaryParser,
buckets.LockersParser,
buckets.TombstoneParser,
buckets.SmallParser,
buckets.RootParser,
buckets.UserAttributeParserV3,
buckets.ParentParser,
buckets.SplitParser,
buckets.ContainerCountersParser,
buckets.ECInfoParser,
buckets.ExpirationEpochToObjectParser,
buckets.ObjectToExpirationEpochParser,
),
common.RawParser.ToFallbackParser(),
)
var MetabaseParserV2 = common.WithFallback(
var MetabaseParser = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
buckets.SmallParser,
buckets.RootParser,
buckets.OwnerParser,
buckets.UserAttributeParserV2,
buckets.UserAttributeParser,
buckets.PayloadHashParser,
buckets.ParentParser,
buckets.SplitParser,

View file

@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
return spew.Sdump(*r)
}

View file

@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No
}
}
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "cid":
id := val.(cid.ID)
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}

View file

@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
}
return &r, nil, nil
}
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 72 {
return nil, nil, ErrInvalidKeyLength
}
var (
r ExpirationEpochToObjectRecord
err error
)
r.epoch = binary.BigEndian.Uint64(key[:8])
if err = r.cnt.Decode(key[8:40]); err != nil {
return nil, nil, err
}
if err = r.obj.Decode(key[40:]); err != nil {
return nil, nil, err
}
return &r, nil, nil
}
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 32 {
return nil, nil, ErrInvalidKeyLength
}
if len(value) != 8 {
return nil, nil, ErrInvalidValueLength
}
var (
r ObjectToExpirationEpochRecord
err error
)
if err = r.obj.Decode(key); err != nil {
return nil, nil, err
}
r.epoch = binary.LittleEndian.Uint64(value)
return &r, nil, nil
}

View file

@ -2,7 +2,6 @@ package records
import (
"fmt"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2"
@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
len(r.ids),
)
}
func (r *ExpirationEpochToObjectRecord) String() string {
return fmt.Sprintf(
"exp. epoch %s %c CID %s OID %s",
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
)
}
func (r *ObjectToExpirationEpochRecord) String() string {
return fmt.Sprintf(
"OID %s %c exp. epoch %s",
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
)
}

View file

@ -79,15 +79,4 @@ type (
id oid.ID
ids []oid.ID
}
ExpirationEpochToObjectRecord struct {
epoch uint64
cnt cid.ID
obj oid.ID
}
ObjectToExpirationEpochRecord struct {
obj oid.ID
epoch uint64
}
)

View file

@ -1,8 +1,6 @@
package tui
import (
"slices"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
f.history = append(f.history, s)
}
@ -53,17 +51,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++
// Stop iterating over history.
if f.historyPointer == len(f.history) {
f.SetText(f.currentContent)
f.InputField.SetText(f.currentContent)
return
}
f.SetText(f.history[f.historyPointer])
f.InputField.SetText(f.history[f.historyPointer])
case tcell.KeyUp:
if len(f.history) == 0 {
return
}
// Start iterating over history.
if f.historyPointer == len(f.history) {
f.currentContent = f.GetText()
f.currentContent = f.InputField.GetText()
}
// End of history.
if f.historyPointer == 0 {
@ -71,7 +69,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
}
// Iterate to least recent prompts.
f.historyPointer--
f.SetText(f.history[f.historyPointer])
f.InputField.SetText(f.history[f.historyPointer])
default:
f.InputField.InputHandler()(event, func(tview.Primitive) {})
}

View file

@ -8,7 +8,6 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@ -95,7 +94,9 @@ func (v *RecordsView) Mount(ctx context.Context) error {
}
func (v *RecordsView) Unmount() {
assert.False(v.onUnmount == nil, "try to unmount not mounted component")
if v.onUnmount == nil {
panic("try to unmount not mounted component")
}
v.onUnmount()
v.onUnmount = nil
}

View file

@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
}
ui.MouseHandler()
ui.Box.MouseHandler()
}
func (ui *UI) WithPrompt(prompt string) error {

View file

@ -14,7 +14,7 @@ import (
func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
c.key,
c.shared.key,
c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)

View file

@ -1,27 +1,20 @@
package main
import (
"bytes"
"cmp"
"context"
"slices"
"sync"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.uber.org/zap"
)
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
@ -117,6 +110,55 @@ func (c *ttlNetCache[K, V]) remove(key K) {
hit = c.cache.Remove(key)
}
// entity that provides LRU cache interface.
type lruNetCache struct {
cache *lru.Cache[uint64, *netmapSDK.NetMap]
netRdr netValueReader[uint64, *netmapSDK.NetMap]
metrics cacheMetrics
}
// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache {
cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
fatalOnErr(err)
return &lruNetCache{
cache: cache,
netRdr: netRdr,
metrics: metrics,
}
}
// reads value by the key.
//
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
}()
val, ok := c.cache.Get(key)
if ok {
hit = true
return val, nil
}
val, err := c.netRdr(ctx, key)
if err != nil {
return nil, err
}
c.cache.Add(key, val)
return val, nil
}
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage struct {
@ -158,236 +200,20 @@ func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*con
type lruNetmapSource struct {
netState netmap.State
client rawSource
cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
mtx sync.RWMutex
metrics cacheMetrics
log *logger.Logger
candidates atomic.Pointer[[]netmapSDK.NodeInfo]
cache *lruNetCache
}
type rawSource interface {
GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
}
func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
) netmap.Source {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil)
fatalOnErr(err)
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(ctx, key)
}, metrics.NewCacheMetrics("netmap"))
src := &lruNetmapSource{
netState: netState,
client: client,
cache: cache,
log: log,
metrics: metrics.NewCacheMetrics("netmap"),
return &lruNetmapSource{
netState: s,
cache: lruNetmapCache,
}
wg.Add(1)
go func() {
defer wg.Done()
src.updateCandidates(ctx, d)
}()
return src
}
// updateCandidates routine to merge netmap in cache with candidates list.
func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
timer := time.NewTimer(d)
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
newCandidates, err := s.client.GetCandidates(ctx)
if err != nil {
s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
timer.Reset(d)
break
}
if len(newCandidates) == 0 {
s.candidates.Store(&newCandidates)
timer.Reset(d)
break
}
slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
return cmp.Compare(n1.Hash(), n2.Hash())
})
// Check once state changed
v := s.candidates.Load()
if v == nil {
s.candidates.Store(&newCandidates)
s.mergeCacheWithCandidates(newCandidates)
timer.Reset(d)
break
}
ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
uint32(n1.Status()) != uint32(n2.Status()) ||
slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
return 1
}
var ne1 []string
n1.IterateNetworkEndpoints(func(s string) bool {
ne1 = append(ne1, s)
return false
})
var ne2 []string
n2.IterateNetworkEndpoints(func(s string) bool {
ne2 = append(ne2, s)
return false
})
return slices.Compare(ne1, ne2)
})
if ret != 0 {
s.candidates.Store(&newCandidates)
s.mergeCacheWithCandidates(newCandidates)
}
timer.Reset(d)
}
}
}
func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) {
s.mtx.Lock()
tmp := s.cache.Values()
s.mtx.Unlock()
for _, pointer := range tmp {
nm := pointer.Load()
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
nm = nm.Clone()
mergeNetmapWithCandidates(updates, nm)
pointer.Store(nm)
}
}
}
// reads value by the key.
//
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
}()
s.mtx.RLock()
val, ok := s.cache.Get(key)
s.mtx.RUnlock()
if ok {
hit = true
return val.Load(), nil
}
s.mtx.Lock()
defer s.mtx.Unlock()
val, ok = s.cache.Get(key)
if ok {
hit = true
return val.Load(), nil
}
nm, err := s.client.GetNetMapByEpoch(ctx, key)
if err != nil {
return nil, err
}
v := s.candidates.Load()
if v != nil {
updates := getNetMapNodesToUpdate(nm, *v)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
}
p := atomic.Pointer[netmapSDK.NetMap]{}
p.Store(nm)
s.cache.Add(key, &p)
return nm, nil
}
// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
for _, v := range updates {
if v.status != netmapSDK.UnspecifiedState {
nm.Nodes()[v.netmapIndex].SetStatus(v.status)
}
if v.externalAddresses != nil {
nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
}
if v.endpoints != nil {
nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
}
}
}
type nodeToUpdate struct {
netmapIndex int
status netmapSDK.NodeState
externalAddresses []string
endpoints []string
}
// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
var res []nodeToUpdate
for i := range nm.Nodes() {
for _, cnd := range candidates {
if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
var tmp nodeToUpdate
var update bool
if cnd.Status() != nm.Nodes()[i].Status() &&
(cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
update = true
tmp.status = cnd.Status()
}
externalAddresses := cnd.ExternalAddresses()
if externalAddresses != nil &&
slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
update = true
tmp.externalAddresses = externalAddresses
}
nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
nm.Nodes()[i].IterateNetworkEndpoints(func(s string) bool {
nodeEndpoints = append(nodeEndpoints, s)
return false
})
candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
cnd.IterateNetworkEndpoints(func(s string) bool {
candidateEndpoints = append(candidateEndpoints, s)
return false
})
if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
update = true
tmp.endpoints = candidateEndpoints
}
if update {
tmp.netmapIndex = i
res = append(res, tmp)
}
break
}
}
}
return res
}
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
@ -399,7 +225,7 @@ func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*
}
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.get(ctx, epoch)
val, err := s.cache.get(ctx, epoch)
if err != nil {
return nil, err
}

View file

@ -3,11 +3,9 @@ package main
import (
"context"
"errors"
"sync"
"testing"
"time"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
@ -61,75 +59,3 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) {
type noopCacheMetricts struct{}
func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
type rawSrc struct{}
func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
node0 := netmapSDK.NodeInfo{}
node0.SetPublicKey([]byte{byte(1)})
node0.SetStatus(netmapSDK.Online)
node0.SetExternalAddresses("1", "0")
node0.SetNetworkEndpoints("1", "0")
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey([]byte{byte(1)})
node1.SetStatus(netmapSDK.Online)
node1.SetExternalAddresses("1", "0")
node1.SetNetworkEndpoints("1", "0")
return []netmapSDK.NodeInfo{node0, node1}, nil
}
func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
nm := netmapSDK.NetMap{}
nm.SetEpoch(1)
node0 := netmapSDK.NodeInfo{}
node0.SetPublicKey([]byte{byte(1)})
node0.SetStatus(netmapSDK.Maintenance)
node0.SetExternalAddresses("0")
node0.SetNetworkEndpoints("0")
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey([]byte{byte(1)})
node1.SetStatus(netmapSDK.Maintenance)
node1.SetExternalAddresses("0")
node1.SetNetworkEndpoints("0")
nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
return &nm, nil
}
type st struct{}
func (s *st) CurrentEpoch() uint64 {
return 1
}
func TestNetmapStorage(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
wg := sync.WaitGroup{}
cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
nm, err := cache.GetNetMapByEpoch(ctx, 1)
require.NoError(t, err)
require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
require.Eventually(t, func() bool {
nm, err := cache.GetNetMapByEpoch(ctx, 1)
require.NoError(t, err)
for _, node := range nm.Nodes() {
if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
node.NumberOfNetworkEndpoints() == 2) {
return false
}
}
return true
}, time.Second*5, time.Millisecond*10)
cancel()
wg.Wait()
}

View file

@ -108,7 +108,6 @@ type applicationConfiguration struct {
level string
destination string
timestamp bool
options []zap.Option
}
ObjectCfg struct {
@ -118,6 +117,7 @@ type applicationConfiguration struct {
EngineCfg struct {
errorThreshold uint32
shardPoolSize uint32
shards []shardCfg
lowMem bool
}
@ -233,14 +233,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
var opts []zap.Option
if loggerconfig.ToLokiConfig(c).Enabled {
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
return lokiCore
})}
}
a.LoggerCfg.options = opts
// Object
@ -258,6 +250,7 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
@ -482,6 +475,7 @@ type shared struct {
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
@ -722,11 +716,16 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm := c.loggerPrm()
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
}
c.internals = initInternals(appCfg, log)
@ -894,6 +893,7 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option
opts = append(opts,
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
@ -933,7 +933,6 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync),
writecache.WithLogger(c.log),
writecache.WithQoSLimiter(shCfg.limiter),
)
}
return writeCacheOpts
@ -1049,7 +1048,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
}
if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
}
var sh shardOptsWithID
@ -1079,23 +1077,26 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh
}
func (c *cfg) loggerPrm() (logger.Prm, error) {
var prm logger.Prm
// (re)init read configuration
err := prm.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
func (c *cfg) loggerPrm() *logger.Prm {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
}
err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
prm.PrependTimestamp = c.LoggerCfg.timestamp
prm.Options = c.LoggerCfg.options
return prm, nil
// (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log level format: " + c.LoggerCfg.level)
}
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1335,7 +1336,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support
// Logger's dynamic reconfiguration approach
components := c.getComponents(ctx)
// Logger
logPrm := c.loggerPrm()
components := c.getComponents(ctx, logPrm)
// Object
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
@ -1373,17 +1378,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) getComponents(ctx context.Context) []dCmp {
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
var components []dCmp
components = append(components, dCmp{"logger", func() error {
prm, err := c.loggerPrm()
if err != nil {
return err
}
c.log.Reload(prm)
return nil
}})
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(ctx, c)
return nil

View file

@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) {
dir := t.TempDir()
cfgFileName := path.Join(dir, "cfg_01.yml")
cfgFileName0 := path.Join(dir, "cfg_00.json")
cfgFileName1 := path.Join(dir, "cfg_01.yml")
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
}

View file

@ -11,6 +11,10 @@ import (
const (
subsection = "storage"
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil
}
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
//
// Returns ShardPoolSizeDefault if the value is not a positive number.
func ShardPoolSize(c *config.Config) uint32 {
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
if v > 0 {
return v
}
return ShardPoolSizeDefault
}
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
//
// Returns 0 if the the value is missing.

View file

@ -54,6 +54,7 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
})
@ -63,6 +64,7 @@ func TestEngineSection(t *testing.T) {
num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@ -168,10 +170,9 @@ func TestEngineSection(t *testing.T) {
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
Prohibited: true,
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
})
require.ElementsMatch(t, writeLimits.Tags,

View file

@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0)
if d < 0 {
d = 0
}
return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0)
if s < 0 {
s = 0
}
return s
}
// NoSync returns the value of "no_sync" config parameter.
@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
return max(s, 0)
if s < 0 {
s = 0
}
return s
}

View file

@ -84,7 +84,6 @@ type IOTagConfig struct {
Weight *float64
LimitOps *float64
ReservedOps *float64
Prohibited bool
}
func tags(c *config.Config) []IOTagConfig {
@ -120,13 +119,6 @@ func tags(c *config.Config) []IOTagConfig {
tagConfig.ReservedOps = &r
}
v = c.Value(strconv.Itoa(i) + ".prohibited")
if v != nil {
r, err := cast.ToBoolE(v)
panicOnErr(err)
tagConfig.Prohibited = r
}
result = append(result, tagConfig)
}
}

View file

@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0)
if d <= 0 {
d = 0
}
return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0)
if s <= 0 {
s = 0
}
return s
}

View file

@ -33,9 +33,6 @@ const (
// ContainerCacheSizeDefault represents the default size for the container cache.
ContainerCacheSizeDefault = 100
// PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates.
PollCandidatesTimeoutDefault = 20 * time.Second
)
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
@ -157,17 +154,3 @@ func FrostfsIDCacheSize(c *config.Config) uint32 {
}
return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size")
}
// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter
// from "morph" section.
//
// Returns PollCandidatesTimeoutDefault if the value is not positive duration.
func NetmapCandidatesPollInterval(c *config.Config) time.Duration {
v := config.DurationSafe(c.Sub(subsection).
Sub("netmap").Sub("candidates"), "poll_interval")
if v > 0 {
return v
}
return PollCandidatesTimeoutDefault
}

View file

@ -31,11 +31,12 @@ func Limits(c *config.Config) []LimitConfig {
break
}
if sc.Value("max_ops") == nil {
maxOps := config.IntSafe(sc, "max_ops")
if maxOps == 0 {
panic("no max operations for method group")
}
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
limits = append(limits, LimitConfig{methods, maxOps})
}
return limits

View file

@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) {
})
t.Run("no max operations", func(t *testing.T) {
const path = "testdata/no_max_ops"
const path = "testdata/node"
fileConfigTest := func(c *config.Config) {
require.Panics(t, func() { _ = Limits(c) })
@ -50,28 +50,4 @@ func TestRPCSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
t.Run("zero max operations", func(t *testing.T) {
const path = "testdata/zero_max_ops"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(0))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
}

View file

@ -1,4 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,19 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
],
"max_ops": 0
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,9 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 0
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) {
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
fatalOnErr(err)
c.cnrClient = wrap
c.shared.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap)
@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
c.frostfsidClient = frostfsIDSubjectProvider
c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) {
service := containerService.NewSignService(
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),

View file

@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.metrics == nil {
c.metrics = new(httpComponent)
c.metrics.cfg = c
c.metrics.name = "metrics"
c.metrics.handler = metrics.Handler()
if c.dynamicConfiguration.metrics == nil {
c.dynamicConfiguration.metrics = new(httpComponent)
c.dynamicConfiguration.metrics.cfg = c
c.dynamicConfiguration.metrics.name = "metrics"
c.dynamicConfiguration.metrics.handler = metrics.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
if enabled != c.metrics.enabled {
c.metrics.enabled = enabled
if enabled != c.dynamicConfiguration.metrics.enabled {
c.dynamicConfiguration.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
if address != c.metrics.address {
c.metrics.address = address
if address != c.dynamicConfiguration.metrics.address {
c.dynamicConfiguration.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
if dur != c.metrics.shutdownDur {
c.metrics.shutdownDur = dur
if dur != c.dynamicConfiguration.metrics.shutdownDur {
c.dynamicConfiguration.metrics.shutdownDur = dur
updated = true
}
return c.metrics, updated
return c.dynamicConfiguration.metrics, updated
}
func enableMetricsSvc(c *cfg) {
c.metricsSvc.Enable()
c.shared.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
c.metricsSvc.Disable()
c.shared.metricsSvc.Disable()
}

View file

@ -60,11 +60,10 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
}
if c.cfgMorph.cacheTTL < 0 {
netmapSource = newRawNetmapStorage(wrap)
netmapSource = wrap
} else {
// use RPC node as source of netmap (with caching)
netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg,
morphconfig.NetmapCandidatesPollInterval(c.appCfg))
netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap)
}
c.netMapSource = netmapSource

View file

@ -1,55 +0,0 @@
package main
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
type rawNetmapSource struct {
client *netmapClient.Client
}
func newRawNetmapStorage(client *netmapClient.Client) netmap.Source {
return &rawNetmapSource{
client: client,
}
}
func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
nm, err := s.client.GetNetMap(ctx, diff)
if err != nil {
return nil, err
}
candidates, err := s.client.GetCandidates(ctx)
if err != nil {
return nil, err
}
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
return nm, nil
}
func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
nm, err := s.client.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return nil, err
}
candidates, err := s.client.GetCandidates(ctx)
if err != nil {
return nil, err
}
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
return nm, nil
}
func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) {
return s.client.Epoch(ctx)
}

View file

@ -16,6 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
@ -171,10 +172,12 @@ func initObjectService(c *cfg) {
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
apeSvc := createAPEService(c, &irFetcher, splitSvc)
apeSvc := createAPEService(c, splitSvc)
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
var commonSvc objectService.Common
commonSvc.Init(&c.internals, apeSvc)
commonSvc.Init(&c.internals, aclSvc)
respSvc := objectService.NewResponseService(
&commonSvc,
@ -186,9 +189,9 @@ func initObjectService(c *cfg) {
respSvc,
)
c.metricsSvc = objectService.NewMetricCollector(
c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc)
@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
})
}
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
}
@ -426,19 +429,28 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
)
}
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
return v2.New(
apeSvc,
c.netMapSource,
irFetcher,
c.cfgObject.cnrSource,
v2.WithLogger(c.log),
)
}
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
c.frostfsidClient,
c.shared.frostfsidClient,
c.netMapSource,
c.cfgNetmap.state,
c.cfgObject.cnrSource,
c.binPublicKey,
),
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
splitSvc,
)
}

View file

@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) {
func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.pprof == nil {
c.pprof = new(httpComponent)
c.pprof.cfg = c
c.pprof.name = "pprof"
c.pprof.handler = httputil.Handler()
c.pprof.preReload = tuneProfilers
if c.dynamicConfiguration.pprof == nil {
c.dynamicConfiguration.pprof = new(httpComponent)
c.dynamicConfiguration.pprof.cfg = c
c.dynamicConfiguration.pprof.name = "pprof"
c.dynamicConfiguration.pprof.handler = httputil.Handler()
c.dynamicConfiguration.pprof.preReload = tuneProfilers
updated = true
}
// (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg)
if enabled != c.pprof.enabled {
c.pprof.enabled = enabled
if enabled != c.dynamicConfiguration.pprof.enabled {
c.dynamicConfiguration.pprof.enabled = enabled
updated = true
}
address := profilerconfig.Address(c.appCfg)
if address != c.pprof.address {
c.pprof.address = address
if address != c.dynamicConfiguration.pprof.address {
c.dynamicConfiguration.pprof.address = address
updated = true
}
dur := profilerconfig.ShutdownTimeout(c.appCfg)
if dur != c.pprof.shutdownDur {
c.pprof.shutdownDur = dur
if dur != c.dynamicConfiguration.pprof.shutdownDur {
c.dynamicConfiguration.pprof.shutdownDur = dur
updated = true
}
return c.pprof, updated
return c.dynamicConfiguration.pprof, updated
}
func tuneProfilers(c *cfg) {

View file

@ -43,14 +43,11 @@ func initQoSService(c *cfg) {
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
if err != nil {
s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
@ -73,36 +70,26 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return ctx
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default:
s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}
func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, publicKey) {
return true
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return false
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), publicKey) {
return true
}
}
return false
}

View file

@ -1,226 +0,0 @@
package main
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSService_Client(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag client defined", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func TestQoSService_Internal(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
}
func TestQoSService_Critical(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
}
func TestQoSService_NetmapGetError(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
s.netmapSource = &utilTesting.TestNetmapSource{}
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
nmSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
reqSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedCritSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedIntSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
var node netmap.NodeInfo
node.SetPublicKey(nmSigner.PublicKey().Bytes())
nm := &netmap.NetMap{}
nm.SetEpoch(100)
nm.SetNodes([]netmap.NodeInfo{node})
return &cfgQoSService{
logger: test.NewLogger(t),
netmapSource: &utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
100: nm,
},
CurrentEpoch: 100,
},
allowedCriticalPubs: [][]byte{
allowedCritSigner.PublicKey().Bytes(),
},
allowedInternalPubs: [][]byte{
allowedIntSigner.PublicKey().Bytes(),
},
},
&testQoSServicePublicKeys{
NetmapNode: nmSigner.PublicKey().Bytes(),
Request: reqSigner.PublicKey().Bytes(),
Internal: allowedIntSigner.PublicKey().Bytes(),
Critical: allowedCritSigner.PublicKey().Bytes(),
}
}
type testQoSServicePublicKeys struct {
NetmapNode []byte
Request []byte
Internal []byte
Critical []byte
}

View file

@ -51,9 +51,9 @@ func initTreeService(c *cfg) {
c.treeService = tree.New(
tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource,
cli: c.cnrClient,
cli: c.shared.cnrClient,
}),
tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
tree.WithLogger(c.log),

View file

@ -1,6 +1,7 @@
package main
import (
"os"
"path/filepath"
"testing"
@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
require.NoError(t, err)
})
})
t.Run("mainnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
t.Run("testnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
}

View file

@ -97,6 +97,7 @@ FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard
### Flag to refill Metabase from BlobStor
@ -180,7 +181,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0

View file

@ -158,6 +158,7 @@
]
},
"storage": {
"shard_pool_size": 15,
"shard_ro_error_threshold": 100,
"shard": {
"0": {
@ -252,8 +253,7 @@
{
"tag": "policer",
"weight": 5,
"limit_ops": 25000,
"prohibited": true
"limit_ops": 25000
}
]
},

View file

@ -95,9 +95,6 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
ape_chain_cache_size: 100000
netmap:
candidates:
poll_interval: 20s
apiclient:
dial_timeout: 15s # timeout for FrostFS API client connection
@ -138,6 +135,7 @@ rpc:
storage:
# note: shard configuration can be omitted for relay node (see `node.relay`)
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard:
@ -151,7 +149,7 @@ storage:
flush_worker_count: 30 # number of write-cache flusher threads
metabase:
perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200
max_batch_delay: 20ms
@ -164,13 +162,13 @@ storage:
blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS
gc:
@ -252,7 +250,6 @@ storage:
- tag: policer
weight: 5
limit_ops: 25000
prohibited: true
write:
max_running_ops: 1000
max_waiting_ops: 100
@ -294,7 +291,7 @@ storage:
pilorama:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
perm: 0o644 # permission to use for the database file and intermediate directories
perm: 0644 # permission to use for the database file and intermediate directories
tracing:
enabled: true

28
config/mainnet/README.md Normal file
View file

@ -0,0 +1,28 @@
# N3 Mainnet Storage node configuration
Here is a template for simple storage node configuration in N3 Mainnet.
Make sure to specify correct values instead of `<...>` placeholders.
Do not change `contracts` section. Run the latest frostfs-node release with
the fixed config `frostfs-node -c config.yml`
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
## Tips
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
```yaml
node:
addresses:
- grpcs://frostfs.my.org:8080
grpc:
num: 1
0:
endpoint: frostfs.my.org:8080
tls:
enabled: true
certificate: /path/to/cert
key: /path/to/key
```

70
config/mainnet/config.yml Normal file
View file

@ -0,0 +1,70 @@
node:
wallet:
path: <path/to/wallet>
address: <address-in-wallet>
password: <password>
addresses:
- <announced.address:port>
attribute_0: UN-LOCODE:<XX YYY>
attribute_1: Price:100000
attribute_2: User-Agent:FrostFS\/0.9999
grpc:
num: 1
0:
endpoint: <listen.local.address:port>
tls:
enabled: false
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/path/metabase
perm: 0600
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m
logger:
level: info
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
object:
put:
remote_pool_size: 100
local_pool_size: 100
morph:
rpc_endpoint:
- wss://rpc1.morph.frostfs.info:40341/ws
- wss://rpc2.morph.frostfs.info:40341/ws
- wss://rpc3.morph.frostfs.info:40341/ws
- wss://rpc4.morph.frostfs.info:40341/ws
- wss://rpc5.morph.frostfs.info:40341/ws
- wss://rpc6.morph.frostfs.info:40341/ws
- wss://rpc7.morph.frostfs.info:40341/ws
dial_timeout: 20s
contracts:
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1

129
config/testnet/README.md Normal file
View file

@ -0,0 +1,129 @@
# N3 Testnet Storage node configuration
There is a prepared configuration for NeoFS Storage Node deployment in
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
docker image and run it with docker-compose.
## Build image
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
However, if you need to rebuild it for some reason, run
`make image-storage-testnet` command.
```
$ make image-storage-testnet
...
Successfully built ab0557117b02
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
```
## Deploy node
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
update docker-compose file and start the node.
### Deposit
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
First, obtain GAS in N3 Testnet chain. You can do that with
[faucet](https://neowish.ngd.network) service.
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
You can provide scripthash in the `data` argument of transfer tx to make a
deposit to a specified account. Otherwise, deposit is made to the tx sender.
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
See a deposit example with `neo-go`.
```
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
--token GAS \
--amount 1
```
### Configure
Next, configure `node_config.env` file. Change endpoints values. Both
should contain your **public** IP.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
```
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
attribute.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
```
You can validate UN/LOCODE attribute in
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
with frostfs-cli.
```
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
Country: Russia
Location: Saint Petersburg (ex Leningrad)
Continent: Europe
Subdivision: [SPE] Sankt-Peterburg
Coordinates: 59.53, 30.15
```
It is recommended to pass the node's key as a file. To do so, convert your wallet
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
```
// Print WIF in a 32-byte hex format
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
// Save 32-byte hex into a file
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
```
Then, specify the path to this file in `docker-compose.yml`
```yaml
volumes:
- frostfs_storage:/storage
- ./my_wallet.key:/node.key
```
NeoFS objects will be stored on your machine. By default, docker-compose
is configured to store objects in named docker volume `frostfs_storage`. You can
specify a directory on the filesystem to store objects there.
```yaml
volumes:
- /home/username/frostfs/rc3/storage:/storage
- ./my_wallet.key:/node.key
```
### Start
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
### Debug
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
log, set up log level to debug with this env:
```yaml
environment:
- NEOFS_LOGGER_LEVEL=debug
```

52
config/testnet/config.yml Normal file
View file

@ -0,0 +1,52 @@
logger:
level: info
morph:
rpc_endpoint:
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
dial_timeout: 20s
contracts:
balance: e0420c216003747626670d1424569c17c79015bf
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
node:
key: /node.key
attribute_0: Deployed:SelfHosted
attribute_1: User-Agent:FrostFS\/0.9999
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/metabase
perm: 0777
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m

View file

@ -148,19 +148,15 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
netmap:
candidates:
poll_interval: 20s
```
| Parameter | Type | Default value | Description |
|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).<br/>Negative value disables caching.<br/>Cached entities: containers, container lists, eACL tables. |
| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
| Parameter | Type | Default value | Description |
| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).<br/>Negative value disables caching.<br/>Cached entities: containers, container lists, eACL tables. |
| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@ -174,6 +170,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@ -213,7 +210,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
perm: 0o644
perm: 0644
size: 4194304
depth: 1
width: 4
@ -273,7 +270,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
perm: 0o644
perm: 0644
max_batch_size: 200
max_batch_delay: 20ms
```
@ -363,7 +360,6 @@ limits:
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section

14
go.mod
View file

@ -1,18 +1,18 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
go 1.23
go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/VictoriaMetrics/easyproto v0.1.4

24
go.sum
View file

@ -1,25 +1,25 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d h1:ZLKDupw362Ciing7kdIZhDYGMyo2QZyJ6sS/8X9QWJ0=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d/go.mod h1:2PWt5GwJTnhjHp+mankcfCeAJBMn7puxPm+RS+lliVk=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 h1:QnAt5b2R6+hQthMOIn5ECfLAlVD8IAE5JRm1NCCOmuE=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=

View file

@ -1,25 +1,9 @@
package assert
import (
"fmt"
"strings"
)
import "strings"
func True(cond bool, details ...string) {
if !cond {
panic(strings.Join(details, " "))
}
}
func False(cond bool, details ...string) {
if cond {
panic(strings.Join(details, " "))
}
}
func NoError(err error, details ...string) {
if err != nil {
content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
panic(content)
}
}

View file

@ -252,7 +252,6 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
ShardCouldNotFindObject = "could not find object"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
@ -512,8 +511,5 @@ const (
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
FailedToUpdateNetmapCandidates = "update netmap candidates failed"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
)

View file

@ -23,7 +23,6 @@ const (
policerSubsystem = "policer"
commonCacheSubsystem = "common_cache"
multinetSubsystem = "multinet"
qosSubsystem = "qos"
successLabel = "success"
shardIDLabel = "shard_id"
@ -44,7 +43,6 @@ const (
hitLabel = "hit"
cacheLabel = "cache"
sourceIPLabel = "source_ip"
ioTagLabel = "io_tag"
readWriteMode = "READ_WRITE"
readOnlyMode = "READ_ONLY"

View file

@ -26,7 +26,6 @@ type NodeMetrics struct {
morphCache *morphCacheMetrics
log logger.LogMetrics
multinet *multinetMetrics
qos *QoSMetrics
// nolint: unused
appInfo *ApplicationInfo
}
@ -56,7 +55,6 @@ func NewNodeMetrics() *NodeMetrics {
log: logger.NewLogMetrics(namespace),
appInfo: NewApplicationInfo(misc.Version),
multinet: newMultinetMetrics(namespace),
qos: newQoSMetrics(),
}
}
@ -128,7 +126,3 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
return m.multinet
}
func (m *NodeMetrics) QoSMetrics() *QoSMetrics {
return m.qos
}

Some files were not shown because too many files have changed in this diff Show more