diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile new file mode 100644 index 000000000..4234de160 --- /dev/null +++ b/.ci/Jenkinsfile @@ -0,0 +1,81 @@ +def golang = ['1.23', '1.24'] +def golangDefault = "golang:${golang.last()}" + +async { + + for (version in golang) { + def go = version + + task("test/go${go}") { + container("golang:${go}") { + sh 'make test' + } + } + + task("build/go${go}") { + container("golang:${go}") { + for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { + sh """ + make bin/frostfs-${app} + bin/frostfs-${app} --version + """ + } + } + } + } + + task('test/race') { + container(golangDefault) { + sh 'make test GOFLAGS="-count=1 -race"' + } + } + + task('lint') { + container(golangDefault) { + sh 'make lint-install lint' + } + } + + task('staticcheck') { + container(golangDefault) { + sh 'make staticcheck-install staticcheck-run' + } + } + + task('gopls') { + container(golangDefault) { + sh 'make gopls-install gopls-run' + } + } + + task('gofumpt') { + container(golangDefault) { + sh ''' + make fumpt-install + make fumpt + git diff --exit-code --quiet + ''' + } + } + + task('vulncheck') { + container(golangDefault) { + sh ''' + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck ./... + ''' + } + } + + task('pre-commit') { + dockerfile(""" + FROM ${golangDefault} + RUN apt update && \ + apt install -y --no-install-recommends pre-commit + """) { + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' + } + } + } +} diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index 9129d136e..d568b9607 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 7c5af8410..190d7764a 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index b27e7a39a..c2e293175 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.23 + go-version: 1.24 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index 4f1bebe61..f3f5432ce 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install linters @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -53,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' cache: true - name: Run tests @@ -68,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install staticcheck @@ -104,7 +104,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install gofumpt diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 7c89a3555..bc94792d8 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23.6' + go-version: '1.24' + check-latest: true - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.golangci.yml b/.golangci.yml index d83f36de8..e3ec09f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,95 +1,107 @@ -# This file contains all available configuration options -# with their default values. - -# options for analysis running +version: "2" run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 20m - - # include test files or not, default is true tests: false - -# output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - - format: tab - -# all available settings of specific linters -linters-settings: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: true - govet: - # report about shadowed variables - check-shadowing: false - staticcheck: - checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. - funlen: - lines: 80 # default 60 - statements: 60 # default 40 - gocognit: - min-complexity: 40 # default 30 - importas: - no-unaliased: true - no-extra-aliases: false - alias: - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - target-methods : ["reportFlushError", "reportError"] - disable-packages: ["codes", "err", "res","exec"] - constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - + tab: + path: stdout + colors: false linters: + default: none enable: - # mandatory linters - - govet - - revive - - # some default golangci-lint linters - - errcheck - - gosimple - - godot - - ineffassign - - staticcheck - - typecheck - - unused - - # extra linters - bidichk - - durationcheck - - exhaustive - - copyloopvar - - gofmt - - goimports - - misspell - - predeclared - - reassign - - whitespace - containedctx + - contextcheck + - copyloopvar + - durationcheck + - errcheck + - exhaustive - funlen - gocognit - - contextcheck + - gocritic + - godot - importas - - truecloudlab-linters - - perfsprint - - testifylint - - protogetter + - ineffassign - intrange - - tenv + - misspell + - perfsprint + - predeclared + - protogetter + - reassign + - revive + - staticcheck + - testifylint + - truecloudlab-linters - unconvert - unparam - disable-all: true - fast: false + - unused + - usetesting + - whitespace + settings: + exhaustive: + default-signifies-exhaustive: true + funlen: + lines: 80 + statements: 60 + gocognit: + min-complexity: 40 + gocritic: + disabled-checks: + - ifElseChain + importas: + alias: + - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + no-unaliased: true + no-extra-aliases: false + staticcheck: + checks: + - all + - -QF1002 + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs + disable-packages: + - codes + - err + - res + - exec + target-methods: + - reportFlushError + - reportError + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - goimports + settings: + gci: + sections: + - standard + - default + custom-order: true + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/Makefile b/Makefile index 497dce115..575eaae6f 100755 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ #!/usr/bin/make -f SHELL = bash +.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") @@ -7,16 +8,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.62.2 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 +GO_VERSION ?= 1.23 +LINT_VERSION ?= 2.0.2 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2024.1.1 +STATICCHECK_VERSION ?= 2025.1.1 ARCH = amd64 BIN = bin @@ -42,7 +43,7 @@ GOFUMPT_VERSION ?= v0.7.0 GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) -GOPLS_VERSION ?= v0.15.1 +GOPLS_VERSION ?= v0.17.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) @@ -115,7 +116,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir $(PROTOBUF_DIR) + @mkdir -p $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -169,7 +170,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir $(GOFUMPT_DIR) + @mkdir -p $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -186,21 +187,44 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... +# Install Gerrit commit-msg hook +review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks +review-install: + @git config remote.review.url \ + || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node + @mkdir -p $(GIT_HOOK_DIR)/ + @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg + @chmod +x $(GIT_HOOK_DIR)/commit-msg + @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg + @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg + +# Create a PR in Gerrit +review: BRANCH ?= master +review: + @git push review HEAD:refs/for/$(BRANCH) \ + --push-option r=e.stratonikov@yadro.com \ + --push-option r=d.stepanov@yadro.com \ + --push-option r=an.nikiforov@yadro.com \ + --push-option r=a.arifullin@yadro.com \ + --push-option r=ekaterina.lebedeva@yadro.com \ + --push-option r=a.savchuk@yadro.com \ + --push-option r=a.chuprov@yadro.com + # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: +lint-install: $(BIN) @rm -rf $(OUTPUT_LINT_DIR) - @mkdir $(OUTPUT_LINT_DIR) + @mkdir -p $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -212,7 +236,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir $(STATICCHECK_DIR) + @mkdir -p $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -225,7 +249,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir $(GOPLS_DIR) + @mkdir -p $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index 87692d013..f194e97f5 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -16,9 +16,16 @@ const ( EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagShort = "r" + WalletPath = "wallet" + WalletPathShorthand = "w" + WalletPathUsage = "Path to the wallet" + AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" + AdminWalletPath = "wallet-admin" + AdminWalletUsage = "Path to the admin wallet" + LocalDumpFlag = "local-dump" ProtoConfigPath = "protocol" ContractsInitFlag = "contracts" diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go new file mode 100644 index 000000000..d67b70d2a --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/root.go @@ -0,0 +1,15 @@ +package maintenance + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" + "github.com/spf13/cobra" +) + +var RootCmd = &cobra.Command{ + Use: "maintenance", + Short: "Section for maintenance commands", +} + +func init() { + RootCmd.AddCommand(zombie.Cmd) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go new file mode 100644 index 000000000..1b66889aa --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go @@ -0,0 +1,70 @@ +package zombie + +import ( + "crypto/ecdsa" + "fmt" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/cli/flags" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { + keyDesc := viper.GetString(walletFlag) + if keyDesc == "" { + return &nodeconfig.Key(appCfg).PrivateKey + } + data, err := os.ReadFile(keyDesc) + commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) + + priv, err := keys.NewPrivateKeyFromBytes(data) + if err != nil { + w, err := wallet.NewWalletFromFile(keyDesc) + commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) + return fromWallet(cmd, w, viper.GetString(addressFlag)) + } + return &priv.PrivateKey +} + +func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { + var ( + addr util.Uint160 + err error + ) + + if addrStr == "" { + addr = w.GetChangeAddress() + } else { + addr, err = flags.ParseAddress(addrStr) + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) + } + + acc := w.GetAccount(addr) + if acc == nil { + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) + } + + pass, err := getPassword() + commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) + + commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) + + return &acc.PrivateKey().PrivateKey +} + +func getPassword() (string, error) { + // this check allows empty passwords + if viper.IsSet("password") { + return viper.GetString("password"), nil + } + + return input.ReadPassword("Enter password > ") +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go new file mode 100644 index 000000000..f73f33db9 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go @@ -0,0 +1,31 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func list(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + var containerID *cid.ID + if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { + containerID = &cid.ID{} + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + } + + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { + if containerID != nil && a.Container() != *containerID { + return nil + } + cmd.Println(a.EncodeToString()) + return nil + })) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go new file mode 100644 index 000000000..cd3a64499 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go @@ -0,0 +1,46 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "github.com/spf13/cobra" +) + +func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { + addresses := morphconfig.RPCEndpoint(appCfg) + if len(addresses) == 0 { + commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) + } + key := nodeconfig.Key(appCfg) + cli, err := client.New(cmd.Context(), + key, + client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), + client.WithEndpoints(addresses...), + client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), + ) + commonCmd.ExitOnErr(cmd, "create morph client: %w", err) + return cli +} + +func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { + hs, err := morph.NNSContractAddress(client.NNSContainerContractName) + commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) + cc, err := cntClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) + return cc +} + +func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { + hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) + commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) + cli, err := netmapClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) + return cli +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go new file mode 100644 index 000000000..27f83aec7 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go @@ -0,0 +1,154 @@ +package zombie + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "sync" + + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +type quarantine struct { + // mtx protects current field. + mtx sync.Mutex + current int + trees []*fstree.FSTree +} + +func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { + var paths []string + for _, sh := range engineInfo.Shards { + var storagePaths []string + for _, st := range sh.BlobStorInfo.SubStorages { + storagePaths = append(storagePaths, st.Path) + } + if len(storagePaths) == 0 { + continue + } + paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) + } + q, err := newQuarantine(paths) + commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) + return q +} + +func commonPath(paths []string) string { + if len(paths) == 0 { + return "" + } + if len(paths) == 1 { + return paths[0] + } + minLen := math.MaxInt + for _, p := range paths { + if len(p) < minLen { + minLen = len(p) + } + } + + var sb strings.Builder + for i := range minLen { + for _, path := range paths[1:] { + if paths[0][i] != path[i] { + return sb.String() + } + } + sb.WriteByte(paths[0][i]) + } + return sb.String() +} + +func newQuarantine(paths []string) (*quarantine, error) { + var q quarantine + for i := range paths { + f := fstree.New( + fstree.WithDepth(1), + fstree.WithDirNameLen(1), + fstree.WithPath(paths[i]), + fstree.WithPerm(os.ModePerm), + ) + if err := f.Open(mode.ComponentReadWrite); err != nil { + return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) + } + if err := f.Init(); err != nil { + return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) + } + q.trees = append(q.trees, f) + } + return &q, nil +} + +func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { + for i := range q.trees { + res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) + if err != nil { + continue + } + return res.Object, nil + } + return nil, &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { + for i := range q.trees { + _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) + if err != nil { + continue + } + return nil + } + return &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { + data, err := obj.Marshal() + if err != nil { + return err + } + + var prm common.PutPrm + prm.Address = objectcore.AddressOf(obj) + prm.Object = obj + prm.RawData = data + + q.mtx.Lock() + current := q.current + q.current = (q.current + 1) % len(q.trees) + q.mtx.Unlock() + + _, err = q.trees[current].Put(ctx, prm) + return err +} + +func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { + var prm common.IteratePrm + prm.Handler = func(elem common.IterationElement) error { + return f(elem.Address) + } + for i := range q.trees { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + _, err := q.trees[i].Iterate(ctx, prm) + if err != nil { + return err + } + } + return nil +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go new file mode 100644 index 000000000..0b8f2f172 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go @@ -0,0 +1,55 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func remove(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + removeObject(cmd, q, addr) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + removeObject(cmd, q, addr) + return nil + })) + } +} + +func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { + err := q.Delete(cmd.Context(), addr) + if errors.Is(err, new(apistatus.ObjectNotFound)) { + return + } + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go new file mode 100644 index 000000000..f179c7c2d --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go @@ -0,0 +1,69 @@ +package zombie + +import ( + "crypto/sha256" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func restore(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + restoreObject(cmd, storageEngine, q, addr, cnrCli) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + restoreObject(cmd, storageEngine, q, addr, cnrCli) + return nil + })) + } +} + +func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { + obj, err := q.Get(cmd.Context(), addr) + commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) + rawCID := make([]byte, sha256.Size) + + cid := addr.Container() + cid.Encode(rawCID) + cnr, err := cnrCli.Get(cmd.Context(), rawCID) + commonCmd.ExitOnErr(cmd, "get container: %w", err) + + putPrm := engine.PutPrm{ + Object: obj, + IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), + } + commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go new file mode 100644 index 000000000..c8fd9e5e5 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go @@ -0,0 +1,123 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + flagBatchSize = "batch-size" + flagBatchSizeUsage = "Objects iteration batch size" + cidFlag = "cid" + cidFlagUsage = "Container ID" + oidFlag = "oid" + oidFlagUsage = "Object ID" + walletFlag = "wallet" + walletFlagShorthand = "w" + walletFlagUsage = "Path to the wallet or binary key" + addressFlag = "address" + addressFlagUsage = "Address of wallet account" + moveFlag = "move" + moveFlagUsage = "Move objects from storage engine to quarantine" +) + +var ( + Cmd = &cobra.Command{ + Use: "zombie", + Short: "Zombie objects related commands", + } + scanCmd = &cobra.Command{ + Use: "scan", + Short: "Scan storage engine for zombie objects and move them to quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) + _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) + _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) + _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) + }, + Run: scan, + } + listCmd = &cobra.Command{ + Use: "list", + Short: "List zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + }, + Run: list, + } + restoreCmd = &cobra.Command{ + Use: "restore", + Short: "Restore zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: restore, + } + removeCmd = &cobra.Command{ + Use: "remove", + Short: "Remove zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: remove, + } +) + +func init() { + initScanCmd() + initListCmd() + initRestoreCmd() + initRemoveCmd() +} + +func initScanCmd() { + Cmd.AddCommand(scanCmd) + + scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) + scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) + scanCmd.Flags().String(addressFlag, "", addressFlagUsage) + scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) +} + +func initListCmd() { + Cmd.AddCommand(listCmd) + + listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + listCmd.Flags().String(cidFlag, "", cidFlagUsage) +} + +func initRestoreCmd() { + Cmd.AddCommand(restoreCmd) + + restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) + restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) +} + +func initRemoveCmd() { + Cmd.AddCommand(removeCmd) + + removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + removeCmd.Flags().String(cidFlag, "", cidFlagUsage) + removeCmd.Flags().String(oidFlag, "", oidFlagUsage) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go new file mode 100644 index 000000000..268ec4911 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go @@ -0,0 +1,281 @@ +package zombie + +import ( + "context" + "crypto/ecdsa" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +func scan(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) + if batchSize == 0 { + commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) + } + move, _ := cmd.Flags().GetBool(moveFlag) + + storageEngine := newEngine(cmd, appCfg) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + nmCli := createNetmapClient(cmd, morphClient) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + pk := getPrivateKey(cmd, appCfg) + + epoch, err := nmCli.Epoch(cmd.Context()) + commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) + + nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) + commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) + + cmd.Printf("Epoch: %d\n", nm.Epoch()) + cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) + + ps := &processStatus{ + statusCount: make(map[status]uint64), + } + + stopCh := make(chan struct{}) + start := time.Now() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tick := time.NewTicker(time.Second) + defer tick.Stop() + for { + select { + case <-cmd.Context().Done(): + return + case <-stopCh: + return + case <-tick.C: + fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) + } + } + }() + go func() { + defer wg.Done() + err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) + close(stopCh) + }() + wg.Wait() + commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) + + cmd.Println() + cmd.Println("Status description:") + cmd.Println("undefined -- nothing is clear") + cmd.Println("found -- object is found in cluster") + cmd.Println("quarantine -- object is not found in cluster") + cmd.Println() + for status, count := range ps.statusCount { + cmd.Printf("Status: %s, Count: %d\n", status, count) + } +} + +type status string + +const ( + statusUndefined status = "undefined" + statusFound status = "found" + statusQuarantine status = "quarantine" +) + +func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { + rawCID := make([]byte, sha256.Size) + cid := obj.Address.Container() + cid.Encode(rawCID) + + cnr, err := cnrCli.Get(ctx, rawCID) + if err != nil { + var errContainerNotFound *apistatus.ContainerNotFound + if errors.As(err, &errContainerNotFound) { + // Policer will deal with this object. + return statusFound, nil + } + return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) + } + nm, err := nmCli.NetMap(ctx) + if err != nil { + return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) + } + + nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) + if err != nil { + // Not enough nodes, check all netmap nodes. + nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) + } + + objID := obj.Address.Object() + cnrID := obj.Address.Container() + local := true + raw := false + if obj.ECInfo != nil { + objID = obj.ECInfo.ParentID + local = false + raw = true + } + prm := clientSDK.PrmObjectHead{ + ObjectID: &objID, + ContainerID: &cnrID, + Local: local, + Raw: raw, + } + + var ni clientCore.NodeInfo + for i := range nodes { + for j := range nodes[i] { + if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { + return statusUndefined, fmt.Errorf("parse node info: %w", err) + } + c, err := cc.Get(ni) + if err != nil { + continue + } + res, err := c.ObjectHead(ctx, prm) + if err != nil { + var errECInfo *objectSDK.ECInfoError + if raw && errors.As(err, &errECInfo) { + return statusFound, nil + } + continue + } + if err := apistatus.ErrFromStatus(res.Status()); err != nil { + continue + } + return statusFound, nil + } + } + + if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { + return statusFound, nil + } + return statusQuarantine, nil +} + +func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, + appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, +) error { + cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ + DialTimeout: apiclientconfig.DialTimeout(appCfg), + StreamTimeout: apiclientconfig.StreamTimeout(appCfg), + ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), + Key: pk, + AllowExternal: apiclientconfig.AllowExternal(appCfg), + }) + ctx := cmd.Context() + + var cursor *engine.Cursor + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var prm engine.ListWithCursorPrm + prm.WithCursor(cursor) + prm.WithCount(batchSize) + + res, err := storageEngine.ListWithCursor(ctx, prm) + if err != nil { + if errors.Is(err, engine.ErrEndOfListing) { + return nil + } + return fmt.Errorf("list with cursor: %w", err) + } + + cursor = res.Cursor() + addrList := res.AddressList() + eg, egCtx := errgroup.WithContext(ctx) + eg.SetLimit(int(batchSize)) + + for i := range addrList { + addr := addrList[i] + eg.Go(func() error { + result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) + if err != nil { + return fmt.Errorf("check object %s status: %w", addr.Address, err) + } + ps.add(result) + + if !move && result == statusQuarantine { + cmd.Println(addr) + return nil + } + + if result == statusQuarantine { + return moveToQuarantine(egCtx, storageEngine, q, addr.Address) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return fmt.Errorf("process objects batch: %w", err) + } + } +} + +func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { + var getPrm engine.GetPrm + getPrm.WithAddress(addr) + res, err := storageEngine.Get(ctx, getPrm) + if err != nil { + return fmt.Errorf("get object %s from storage engine: %w", addr, err) + } + + if err := q.Put(ctx, res.Object()); err != nil { + return fmt.Errorf("put object %s to quarantine: %w", addr, err) + } + + var delPrm engine.DeletePrm + delPrm.WithForceRemoval() + delPrm.WithAddress(addr) + + if err = storageEngine.Delete(ctx, delPrm); err != nil { + return fmt.Errorf("delete object %s from storage engine: %w", addr, err) + } + return nil +} + +type processStatus struct { + guard sync.RWMutex + statusCount map[status]uint64 + count uint64 +} + +func (s *processStatus) add(st status) { + s.guard.Lock() + defer s.guard.Unlock() + s.statusCount[st]++ + s.count++ +} + +func (s *processStatus) total() uint64 { + s.guard.RLock() + defer s.guard.RUnlock() + return s.count +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go new file mode 100644 index 000000000..5be34d502 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -0,0 +1,201 @@ +package zombie + +import ( + "context" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" + shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" + fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "github.com/panjf2000/ants/v2" + "github.com/spf13/cobra" + "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { + ngOpts := storageEngineOptions(c) + shardOpts := shardOptions(cmd, c) + e := engine.New(ngOpts...) + for _, opts := range shardOpts { + _, err := e.AddShard(cmd.Context(), opts...) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + } + commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) + commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) + return e +} + +func storageEngineOptions(c *config.Config) []engine.Option { + return []engine.Option{ + engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), + engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), + } +} + +func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { + var result [][]shard.Option + err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { + result = append(result, getShardOpts(cmd, c, sh)) + return nil + }) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + return result +} + +func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { + wc, wcEnabled := getWriteCacheOpts(sh) + return []shard.Option{ + shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + shard.WithRefillMetabase(sh.RefillMetabase()), + shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), + shard.WithMode(sh.Mode()), + shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), + shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), + shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), + shard.WithWriteCache(wcEnabled), + shard.WithWriteCacheOptions(wc), + shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), + shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), + shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), + shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) + return pool + }), + shard.WithLimiter(qos.NewNoopLimiter()), + } +} + +func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { + if wc := sh.WriteCache(); wc != nil && wc.Enabled() { + var result []writecache.Option + result = append(result, + writecache.WithPath(wc.Path()), + writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), + writecache.WithMaxObjectSize(wc.MaxObjectSize()), + writecache.WithFlushWorkersCount(wc.WorkerCount()), + writecache.WithMaxCacheSize(wc.SizeLimit()), + writecache.WithMaxCacheCount(wc.CountLimit()), + writecache.WithNoSync(wc.NoSync()), + writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + writecache.WithQoSLimiter(qos.NewNoopLimiter()), + ) + return result, true + } + return nil, false +} + +func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { + var piloramaOpts []pilorama.Option + if config.BoolSafe(c.Sub("tree"), "enabled") { + pr := sh.Pilorama() + piloramaOpts = append(piloramaOpts, + pilorama.WithPath(pr.Path()), + pilorama.WithPerm(pr.Perm()), + pilorama.WithNoSync(pr.NoSync()), + pilorama.WithMaxBatchSize(pr.MaxBatchSize()), + pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), + ) + } + return piloramaOpts +} + +func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { + return []meta.Option{ + meta.WithPath(sh.Metabase().Path()), + meta.WithPermissions(sh.Metabase().BoltDB().Perm()), + meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), + meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), + meta.WithBoltDBOptions(&bbolt.Options{ + Timeout: 100 * time.Millisecond, + }), + meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + meta.WithEpochState(&epochState{}), + } +} + +func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { + result := []blobstor.Option{ + blobstor.WithCompression(sh.Compression()), + blobstor.WithStorages(getSubStorages(ctx, sh)), + blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + return result +} + +func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { + var ss []blobstor.SubStorage + for _, storage := range sh.BlobStor().Storages() { + switch storage.Type() { + case blobovniczatree.Type: + sub := blobovniczaconfig.From((*config.Config)(storage)) + blobTreeOpts := []blobovniczatree.Option{ + blobovniczatree.WithRootPath(storage.Path()), + blobovniczatree.WithPermissions(storage.Perm()), + blobovniczatree.WithBlobovniczaSize(sub.Size()), + blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), + blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), + blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), + blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), + blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), + blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), + blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), + blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), + Policy: func(_ *objectSDK.Object, data []byte) bool { + return uint64(len(data)) < sh.SmallSizeLimit() + }, + }) + case fstree.Type: + sub := fstreeconfig.From((*config.Config)(storage)) + fstreeOpts := []fstree.Option{ + fstree.WithPath(storage.Path()), + fstree.WithPerm(storage.Perm()), + fstree.WithDepth(sub.Depth()), + fstree.WithNoSync(sub.NoSync()), + fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: fstree.New(fstreeOpts...), + Policy: func(_ *objectSDK.Object, _ []byte) bool { + return true + }, + }) + default: + // should never happen, that has already + // been handled: when the config was read + } + } + return ss +} + +type epochState struct{} + +func (epochState) CurrentEpoch() uint64 { + return 0 +} diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 914682647..3c332c3f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -3,6 +3,8 @@ package ape import ( "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -76,7 +78,8 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName) + walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) + ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName}) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) var ch util.Uint160 diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index be42f2aa5..23dba14f4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) alphaRes, err := c.InvokeScript(w.Bytes(), nil) if err != nil { @@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan for i := range accounts { emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) res, err := c.Run(w.Bytes()) if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index 65ccc9f9f..c17fb62ff 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: nbuf := make([]byte, 8) - copy(nbuf[:], v) + copy(nbuf, v) n := binary.LittleEndian.Uint64(nbuf) - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: if len(v) == 0 || len(v) > 1 { return helper.InvalidConfigValueErr(k) } - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) default: - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v)))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) } } diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index e72dc15e9..79685f111 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/io" @@ -235,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd putContainer(bw, ch, cnt) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go index 5adb480da..543b5fcb3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error { } } - if writer.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } + assert.NoError(writer.Err, "can't create deployment script") if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { return err @@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) } - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } else if bw.Len() != start { + assert.NoError(bw.Err, "can't create deployment script") + if bw.Len() != start { writer.WriteBytes(bw.Bytes()) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index 437e2480d..fde58fd2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -219,8 +220,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) { if info.version == "" { info.version = "unknown" } - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n", - info.name, info.version, info.hash.StringLE()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", + info.name, info.version, info.hash.StringLE())) } _ = tw.Flush() @@ -236,21 +237,17 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } else { sub.Reset() emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - if sub.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(sub.Err, "can't create version script") script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.BinWriter.WriteBytes(script) + bw.WriteBytes(script) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Opcodes(bw.BinWriter, opcode.PUSH0) } } emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(bw.Err, "can't create version script") res, err := c.InvokeScript(bw.Bytes(), nil) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index b229d0436..7f777db98 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,6 +1,8 @@ package frostfsid import ( + "encoding/hex" + "errors" "fmt" "math/big" "sort" @@ -33,11 +35,16 @@ const ( subjectNameFlag = "subject-name" subjectKeyFlag = "subject-key" subjectAddressFlag = "subject-address" - includeNamesFlag = "include-names" + extendedFlag = "extended" groupNameFlag = "group-name" groupIDFlag = "group-id" rootNamespacePlaceholder = "" + + keyFlag = "key" + keyDescFlag = "Key for storing a value in the subject's KV storage" + valueFlag = "value" + valueDescFlag = "Value to be stored in the subject's KV storage" ) var ( @@ -151,6 +158,23 @@ var ( }, Run: frostfsidListGroupSubjects, } + + frostfsidSetKVCmd = &cobra.Command{ + Use: "set-kv", + Short: "Store a key-value pair in the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidSetKV, + } + frostfsidDeleteKVCmd = &cobra.Command{ + Use: "delete-kv", + Short: "Delete a value from the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidDeleteKV, + } ) func initFrostfsIDCreateNamespaceCmd() { @@ -186,7 +210,7 @@ func initFrostfsIDListSubjectsCmd() { Cmd.AddCommand(frostfsidListSubjectsCmd) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") - frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") + frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") } func initFrostfsIDCreateGroupCmd() { @@ -233,7 +257,22 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") + frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") +} + +func initFrostfsIDSetKVCmd() { + Cmd.AddCommand(frostfsidSetKVCmd) + frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag) + frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) +} + +func initFrostfsIDDeleteKVCmd() { + Cmd.AddCommand(frostfsidDeleteKVCmd) + frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag) } func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { @@ -298,7 +337,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) { } func frostfsidListSubjects(cmd *cobra.Command, _ []string) { - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) ns := getFrostfsIDNamespace(cmd) inv, _, hash := initInvoker(cmd) reader := frostfsidrpclient.NewReader(inv, hash) @@ -311,21 +350,19 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) for _, addr := range subAddresses { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(addr)) continue } - sessionID, it, err := reader.ListSubjects() + items, err := reader.GetSubject(addr) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - items, err := readIterator(inv, &it, sessionID) - commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) - subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name) + printSubjectInfo(cmd, addr, subj) + cmd.Println() } } @@ -403,10 +440,49 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) } +func frostfsidSetKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + value, _ := cmd.Flags().GetString(valueFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "set KV: %w", err) +} + +func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "delete KV: %w", err) +} + func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) inv, cs, hash := initInvoker(cmd) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) @@ -424,7 +500,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) for _, subjAddr := range subjects { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(subjAddr)) continue } @@ -433,7 +509,8 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name) + printSubjectInfo(cmd, subjAddr, subj) + cmd.Println() } } @@ -523,3 +600,30 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui return inv, cs, nmHash } + +func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) { + cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) + pk := "" + if subj.PrimaryKey != nil { + pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) + } + cmd.Printf("Primary key: %s\n", pk) + cmd.Printf("Name: %s\n", subj.Name) + cmd.Printf("Namespace: %s\n", subj.Namespace) + if len(subj.AdditionalKeys) > 0 { + cmd.Printf("Additional keys:\n") + for _, key := range subj.AdditionalKeys { + k := "" + if key != nil { + k = hex.EncodeToString(key.Bytes()) + } + cmd.Printf("- %s\n", k) + } + } + if len(subj.KV) > 0 { + cmd.Printf("KV:\n") + for k, v := range subj.KV { + cmd.Printf("- %s: %s\n", k, v) + } + } +} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 6ffcaa487..8aad5c5c1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -12,6 +12,8 @@ func init() { initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() + initFrostfsIDSetKVCmd() + initFrostfsIDDeleteKVCmd() initFrostfsIDAddSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index eb0444408..6499ace5f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -3,9 +3,6 @@ package helper import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -16,7 +13,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -28,32 +24,86 @@ type LocalActor struct { rpcInvoker invoker.RPCInvoke } +type AlphabetWallets struct { + Label string + Path string +} + +func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) { + w, err := GetAlphabetWallets(v, a.Path) + if err != nil { + return nil, err + } + + var accounts []*wallet.Account + for _, wall := range w { + acc, err := GetWalletAccount(wall, a.Label) + if err != nil { + return nil, err + } + accounts = append(accounts, acc) + } + return accounts, nil +} + +type RegularWallets struct{ Path string } + +func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) { + w, err := getRegularWallet(r.Path) + if err != nil { + return nil, err + } + + return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil +} + // NewLocalActor create LocalActor with accounts form provided wallets. // In case of empty wallets provided created actor with dummy account only for read operation. // // If wallets are provided, the contract client will use accounts with accName name from these wallets. // To determine which account name should be used in a contract client, refer to how the contract // verifies the transaction signature. -func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) { - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) +func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { var act *actor.Actor var accounts []*wallet.Account + var signers []actor.SignerAccount - wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) - commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) + if alphabet != nil { + account, err := alphabet.GetAccount(viper.GetViper()) + if err != nil { + return nil, err + } - for _, w := range wallets { - acc, err := GetWalletAccount(w, accName) - commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) - accounts = append(accounts, acc) + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) } - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: accounts[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: accounts[0], - }}) + + for _, w := range regularWallets { + if w == nil { + continue + } + account, err := w.GetAccount() + if err != nil { + return nil, err + } + + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) + } + + act, err := actor.New(c, signers) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 961ceba53..50b5c1ec7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -13,9 +14,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" @@ -187,19 +186,9 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (* } func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - switch c.(type) { - case *rpcclient.Client: - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) - default: - b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) - if err != nil { - return false, fmt.Errorf("`isAvailable`: invalid response: %w", err) - } - - return b, nil - } + inv := invoker.New(c, nil) + reader := nns2.NewReader(inv, nnsHash) + return reader.IsAvailable(name) } func CheckNotaryEnabled(c Client) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index 8e5615baa..da5ffedae 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -13,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -21,6 +22,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -28,7 +30,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -375,9 +376,7 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen } act, err = actor.New(c.Client, signers) } else { - if withConsensus { - panic("BUG: should never happen") - } + assert.False(withConsensus, "BUG: should never happen") act, err = c.CommitteeAct, nil } if err != nil { @@ -411,11 +410,9 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { version, err := c.Client.GetVersion() - if err != nil { - // error appears only if client - // has not been initialized - panic(err) - } + // error appears only if client + // has not been initialized + assert.NoError(err) network := version.Protocol.Network // Use parameter context to avoid dealing with signature order. @@ -447,12 +444,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin for i := range tx.Signers { if tx.Signers[i].Account == h { + assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") if i < len(tx.Scripts) { tx.Scripts[i] = *w - } else if i == len(tx.Scripts) { + } + if i == len(tx.Scripts) { tx.Scripts = append(tx.Scripts, *w) - } else { - panic("BUG: invalid signing order") } return nil } @@ -510,9 +507,7 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) emit.Opcodes(bw.BinWriter, opcode.ASSERT) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) return bw.Bytes(), false, nil } @@ -524,12 +519,8 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U } func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone) - if err != nil { - return false, err - } - - return res.State == vmstate.Halt.String(), nil + avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) + return !avail, err } func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index d0a05d5c7..46611c177 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" @@ -316,9 +317,7 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint func (l *LocalClient) putTransactions() error { // 1. Prepare new block. lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - if err != nil { - panic(err) - } + assert.NoError(err) defer func() { l.transactions = l.transactions[:0] }() b := &block.Block{ @@ -359,9 +358,7 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s w := io.NewBufBinWriter() emit.Array(w.BinWriter, parameters...) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - if w.Err != nil { - panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) - } + assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) return c.InvokeScript(w.Bytes(), signers) } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go index fb8f03783..20abaff0a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go @@ -3,6 +3,7 @@ package helper import ( "errors" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -118,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error { return err } for k, v := range m { - for _, key := range NetmapConfigKeys { - if k == key { - md[k] = v - break - } + if slices.Contains(NetmapConfigKeys, k) { + md[k] = v } } return nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index c26aa447b..be6b2c6dd 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" + "github.com/nspcc-dev/neo-go/cli/input" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" @@ -22,6 +23,27 @@ import ( "github.com/spf13/viper" ) +func getRegularWallet(walletPath string) (*wallet.Wallet, error) { + w, err := wallet.NewWalletFromFile(walletPath) + if err != nil { + return nil, err + } + + password, err := input.ReadPassword("Enter password for wallet:") + if err != nil { + return nil, fmt.Errorf("can't fetch password: %w", err) + } + + for i := range w.Accounts { + if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { + err = fmt.Errorf("can't unlock wallet: %w", err) + break + } + } + + return w, err +} + func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { wallets, err := openAlphabetWallets(v, walletDir) if err != nil { @@ -51,7 +73,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er if errors.Is(err, os.ErrNotExist) { err = nil } else { - err = fmt.Errorf("can't open wallet: %w", err) + err = fmt.Errorf("can't open alphabet wallet: %w", err) } break } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go index e127ca545..176356378 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -111,9 +112,7 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - if w.Err != nil { - panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err)) - } + assert.NoError(w.Err, "can't wrap register script") } func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 4c6607f9a..7b7597d91 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -1,21 +1,18 @@ package initialize import ( - "errors" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -30,7 +27,8 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - regPrice, err := getCandidateRegisterPrice(c) + reader := neo.NewReader(c.ReadOnlyInvoker) + regPrice, err := reader.GetRegisterPrice() if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } @@ -42,9 +40,7 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error { emit.Opcodes(w.BinWriter, opcode.ASSERT) } emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - if w.Err != nil { - panic(fmt.Sprintf("BUG: %v", w.Err)) - } + assert.NoError(w.Err) signers := []actor.SignerAccount{{ Signer: c.GetSigner(false, c.CommitteeAcc), @@ -116,7 +112,7 @@ func registerCandidates(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { neoHash := neo.Hash - ok, err := transferNEOFinished(c, neoHash) + ok, err := transferNEOFinished(c) if ok || err != nil { return err } @@ -139,33 +135,8 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { return c.AwaitTx() } -func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) { - r := nep17.NewReader(c.ReadOnlyInvoker, neoHash) +func transferNEOFinished(c *helper.InitializeContext) (bool, error) { + r := neo.NewReader(c.ReadOnlyInvoker) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } - -var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") - -func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) { - switch c.Client.(type) { - case *rpcclient.Client: - inv := invoker.New(c.Client, nil) - reader := neo.NewReader(inv) - return reader.GetRegisterPrice() - default: - neoHash := neo.Hash - res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) - if err != nil { - return 0, err - } - if len(res.Stack) == 0 { - return 0, errGetPriceInvalid - } - bi, err := res.Stack[0].TryInteger() - if err != nil || !bi.IsInt64() { - return 0, errGetPriceInvalid - } - return bi.Int64(), nil - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index 7f1bfee2b..bb684b3a9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -22,15 +22,14 @@ import ( ) const ( - gasInitialTotalSupply = 30000000 * native.GASFactor // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. initialAlphabetGASAmount = 10_000 * native.GASFactor // initialProxyGASAmount represents the amount of GAS given to a proxy contract. initialProxyGASAmount = 50_000 * native.GASFactor ) -func initialCommitteeGASAmount(c *helper.InitializeContext) int64 { - return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 +func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { + return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 } func transferFunds(c *helper.InitializeContext) error { @@ -42,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error { return err } + version, err := c.Client.GetVersion() + if err != nil { + return err + } + var transfers []transferTarget for _, acc := range c.Accounts { to := acc.Contract.ScriptHash() @@ -59,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error { transferTarget{ Token: gas.Hash, Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: initialCommitteeGASAmount(c), + Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), }, transferTarget{ Token: neo.Hash, @@ -83,16 +87,23 @@ func transferFunds(c *helper.InitializeContext) error { // transferFundsFinished checks balances of accounts we transfer GAS to. // The stage is considered finished if the balance is greater than the half of what we need to transfer. func transferFundsFinished(c *helper.InitializeContext) (bool, error) { - acc := c.Accounts[0] - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - res, err := r.BalanceOf(acc.Contract.ScriptHash()) - if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 { + res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash()) + if err != nil { + return false, err + } + + version, err := c.Client.GetVersion() + if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { return false, err } res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) - return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err + if err != nil { + return false, err + } + + return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err } func transferGASToProxy(c *helper.InitializeContext) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index 1668bb327..14f6eb390 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -6,7 +6,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" + "github.com/spf13/viper" ) func initRegisterCmd() { @@ -19,6 +21,7 @@ func initRegisterCmd() { registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") + registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) } @@ -48,6 +51,7 @@ func initDeleteCmd() { deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) } @@ -62,3 +66,28 @@ func deleteDomain(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) cmd.Println("Domain deleted successfully") } + +func initSetAdminCmd() { + Cmd.AddCommand(setAdminCmd) + setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage) + _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath) + + _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag) +} + +func setAdmin(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath)) + commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err) + h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash()) + + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "Set admin error: %w", err) + cmd.Println("Set admin successfully") +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index b13cbc8a1..e49f62256 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -1,7 +1,11 @@ package nns import ( + "errors" + client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -16,7 +20,32 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName) + alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) + walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath)) + adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath)) + + var ( + alphabet *helper.AlphabetWallets + regularWallets []*helper.RegularWallets + ) + + if alphabetWalletPath != "" { + alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName} + } + + if walletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) + } + + if adminWalletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath}) + } + + if alphabet == nil && regularWallets == nil { + commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) + } + + ac, err := helper.NewLocalActor(c, alphabet, regularWallets...) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) r := management.NewReader(ac.Invoker) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 09ed92ab3..9cb47356f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -19,6 +19,7 @@ func initAddRecordCmd() { addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) @@ -40,6 +41,7 @@ func initDelRecordsCmd() { delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) + delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) @@ -52,6 +54,7 @@ func initDelRecordCmd() { delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index 9bdeaccd9..bb84933c6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -39,6 +39,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: registerDomain, } @@ -48,6 +49,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: deleteDomain, } @@ -75,6 +77,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: addRecord, } @@ -92,6 +95,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecords, } @@ -101,9 +105,21 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecord, } + setAdminCmd = &cobra.Command{ + Use: "set-admin", + Short: "Sets admin for domain", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) + _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath)) + }, + Run: setAdmin, + } ) func init() { @@ -116,4 +132,5 @@ func init() { initGetRecordsCmd() initDelRecordsCmd() initDelRecordCmd() + initSetAdminCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go index 686a244f0..f2932e87c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go @@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { buf := bytes.NewBuffer(nil) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee))) - _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte))) - _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice))) + _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) + _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) + _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index defd898c8..cc8225c7a 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -5,9 +5,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" @@ -41,8 +41,8 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) - rootCmd.AddCommand(storagecfg.RootCmd) rootCmd.AddCommand(metabase.RootCmd) + rootCmd.AddCommand(maintenance.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go deleted file mode 100644 index 77183fb49..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go +++ /dev/null @@ -1,137 +0,0 @@ -package storagecfg - -const configTemplate = `logger: - level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" - -node: - wallet: - path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented - address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented - password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented - addresses: # list of addresses announced by Storage node in the Network map - - {{ .AnnouncedAddress }} - attribute_0: UN-LOCODE:{{ .Attribute.Locode }} - relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map - -grpc: - num: 1 # total number of listener endpoints - 0: - endpoint: {{ .Endpoint }} # endpoint for gRPC server - tls:{{if .TLSCert}} - enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2) - certificate: {{ .TLSCert }} # path to TLS certificate - key: {{ .TLSKey }} # path to TLS key - {{- else }} - enabled: false # disable TLS for a gRPC connection - {{- end}} - -control: - authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service - {{- range .AuthorizedKeys }} - - {{.}}{{end}} - grpc: - endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service - -morph: - dial_timeout: 20s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # use TTL cache for side chain GET operations - rpc_endpoint: # side chain N3 RPC endpoints - {{- range .MorphRPC }} - - address: wss://{{.}}/ws{{end}} -{{if not .Relay }} -storage: - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations - - shard: - default: # section with the default shard parameters - metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) - - blobstor: - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) - depth: 2 # max depth of object tree storage in FS - small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes - compress: true # turn on/off Zstandard compression (level 3) of stored objects - compression_exclude_content_types: - - audio/* - - video/* - - blobovnicza: - size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - depth: 1 # max depth of object tree storage in key-value DB - width: 4 # max width of object tree storage in key-value DB - opened_cache_capacity: 50 # maximum number of opened database files - opened_cache_ttl: 5m # ttl for opened database file - opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - gc: - remover_batch_size: 200 # number of objects to be removed by the garbage collector - remover_sleep_interval: 5m # frequency of the garbage collector invocation - 0: - mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only" - - metabase: - path: {{ .MetabasePath }} # path to the metabase - - blobstor: - path: {{ .BlobstorPath }} # path to the blobstor -{{end}}` - -const ( - neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221" - balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55" - neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1" - balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf" -) - -var n3config = map[string]struct { - MorphRPC []string - RPC []string - NeoFSContract string - BalanceContract string -}{ - "testnet": { - MorphRPC: []string{ - "rpc01.morph.testnet.fs.neo.org:51331", - "rpc02.morph.testnet.fs.neo.org:51331", - "rpc03.morph.testnet.fs.neo.org:51331", - "rpc04.morph.testnet.fs.neo.org:51331", - "rpc05.morph.testnet.fs.neo.org:51331", - "rpc06.morph.testnet.fs.neo.org:51331", - "rpc07.morph.testnet.fs.neo.org:51331", - }, - RPC: []string{ - "rpc01.testnet.n3.nspcc.ru:21331", - "rpc02.testnet.n3.nspcc.ru:21331", - "rpc03.testnet.n3.nspcc.ru:21331", - "rpc04.testnet.n3.nspcc.ru:21331", - "rpc05.testnet.n3.nspcc.ru:21331", - "rpc06.testnet.n3.nspcc.ru:21331", - "rpc07.testnet.n3.nspcc.ru:21331", - }, - NeoFSContract: neofsTestnetAddress, - BalanceContract: balanceTestnetAddress, - }, - "mainnet": { - MorphRPC: []string{ - "rpc1.morph.fs.neo.org:40341", - "rpc2.morph.fs.neo.org:40341", - "rpc3.morph.fs.neo.org:40341", - "rpc4.morph.fs.neo.org:40341", - "rpc5.morph.fs.neo.org:40341", - "rpc6.morph.fs.neo.org:40341", - "rpc7.morph.fs.neo.org:40341", - }, - RPC: []string{ - "rpc1.n3.nspcc.ru:10331", - "rpc2.n3.nspcc.ru:10331", - "rpc3.n3.nspcc.ru:10331", - "rpc4.n3.nspcc.ru:10331", - "rpc5.n3.nspcc.ru:10331", - "rpc6.n3.nspcc.ru:10331", - "rpc7.n3.nspcc.ru:10331", - }, - NeoFSContract: neofsMainnetAddress, - BalanceContract: balanceMainnetAddress, - }, -} diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go deleted file mode 100644 index 8acbc4579..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ /dev/null @@ -1,433 +0,0 @@ -package storagecfg - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "fmt" - "math/rand" - "net" - "net/url" - "os" - "path/filepath" - "slices" - "strconv" - "strings" - "text/template" - "time" - - netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "github.com/chzyer/readline" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - - "github.com/spf13/cobra" -) - -const ( - walletFlag = "wallet" - accountFlag = "account" -) - -const ( - defaultControlEndpoint = "localhost:8090" - defaultDataEndpoint = "localhost" -) - -// RootCmd is a root command of config section. -var RootCmd = &cobra.Command{ - Use: "storage-config [-w wallet] [-a acccount] []", - Short: "Section for storage node configuration commands", - Run: storageConfig, -} - -func init() { - fs := RootCmd.Flags() - - fs.StringP(walletFlag, "w", "", "Path to wallet") - fs.StringP(accountFlag, "a", "", "Wallet account") -} - -type config struct { - AnnouncedAddress string - AuthorizedKeys []string - ControlEndpoint string - Endpoint string - TLSCert string - TLSKey string - MorphRPC []string - Attribute struct { - Locode string - } - Wallet struct { - Path string - Account string - Password string - } - Relay bool - BlobstorPath string - MetabasePath string -} - -func storageConfig(cmd *cobra.Command, args []string) { - outPath := getOutputPath(args) - - historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history") - readline.SetHistoryPath(historyPath) - - var c config - - c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag) - if c.Wallet.Path == "" { - c.Wallet.Path = getPath("Path to the storage node wallet: ") - } - - w, err := wallet.NewWalletFromFile(c.Wallet.Path) - fatalOnErr(err) - - fillWalletAccount(cmd, &c, w) - - accH, err := flags.ParseAddress(c.Wallet.Account) - fatalOnErr(err) - - acc := w.GetAccount(accH) - if acc == nil { - fatalOnErr(errors.New("can't find account in wallet")) - } - - c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) - fatalOnErr(err) - - err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) - fatalOnErr(err) - - c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes())) - - network := readNetwork(cmd) - - c.MorphRPC = n3config[network].MorphRPC - - depositGas(cmd, acc, network) - - c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ") - - endpoint := getDefaultEndpoint(cmd, &c) - c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint)) - if c.Endpoint == "" { - c.Endpoint = endpoint - } - - c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint)) - if c.ControlEndpoint == "" { - c.ControlEndpoint = defaultControlEndpoint - } - - c.TLSCert = getPath("TLS Certificate (optional): ") - if c.TLSCert != "" { - c.TLSKey = getPath("TLS Key: ") - } - - c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ") - if !c.Relay { - p := getPath("Path to the storage directory (all available storage will be used): ") - c.BlobstorPath = filepath.Join(p, "blob") - c.MetabasePath = filepath.Join(p, "meta") - } - - out := applyTemplate(c) - fatalOnErr(os.WriteFile(outPath, out, 0o644)) - - cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`") -} - -func getDefaultEndpoint(cmd *cobra.Command, c *config) string { - var addr, port string - for { - c.AnnouncedAddress = getString("Publicly announced address: ") - validator := netutil.Address{} - err := validator.FromString(c.AnnouncedAddress) - if err != nil { - cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.") - continue - } - uriAddr, err := url.Parse(validator.URIAddr()) - if err != nil { - panic(fmt.Errorf("unexpected error: %w", err)) - } - addr = uriAddr.Hostname() - port = uriAddr.Port() - ip, err := net.ResolveIPAddr("ip", addr) - if err != nil { - cmd.Printf("Can't resolve IP address %s: %v\n", addr, err) - continue - } - - if !ip.IP.IsGlobalUnicast() { - cmd.Println("IP must be global unicast.") - continue - } - cmd.Printf("Resolved IP address: %s\n", ip.String()) - - _, err = strconv.ParseUint(port, 10, 16) - if err != nil { - cmd.Println("Port must be an integer.") - continue - } - - break - } - return net.JoinHostPort(defaultDataEndpoint, port) -} - -func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) { - c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag) - if c.Wallet.Account == "" { - addr := address.Uint160ToString(w.GetChangeAddress()) - c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr)) - if c.Wallet.Account == "" { - c.Wallet.Account = addr - } - } -} - -func readNetwork(cmd *cobra.Command) string { - var network string - for { - network = getString("Choose network [mainnet]/testnet: ") - switch network { - case "": - network = "mainnet" - case "testnet", "mainnet": - default: - cmd.Println(`Network must be either "mainnet" or "testnet"`) - continue - } - break - } - return network -} - -func getOutputPath(args []string) string { - if len(args) != 0 { - return args[0] - } - outPath := getPath("File to write config at [./config.yml]: ") - if outPath == "" { - outPath = "./config.yml" - } - return outPath -} - -func getWalletAccount(w *wallet.Wallet, prompt string) string { - addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts)) - for i := range w.Accounts { - addrs[i] = readline.PcItem(w.Accounts[i].Address) - } - - readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...)) - defer readline.SetAutoComplete(nil) - - s, err := readline.Line(prompt) - fatalOnErr(err) - return strings.TrimSpace(s) // autocompleter can return a string with a trailing space -} - -func getString(prompt string) string { - s, err := readline.Line(prompt) - fatalOnErr(err) - if s != "" { - _ = readline.AddHistory(s) - } - return s -} - -type filenameCompleter struct{} - -func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { - prefix := string(line[:pos]) - dir := filepath.Dir(prefix) - de, err := os.ReadDir(dir) - if err != nil { - return nil, 0 - } - - for i := range de { - name := filepath.Join(dir, de[i].Name()) - if strings.HasPrefix(name, prefix) { - tail := []rune(strings.TrimPrefix(name, prefix)) - if de[i].IsDir() { - tail = append(tail, filepath.Separator) - } - newLine = append(newLine, tail) - } - } - if pos != 0 { - return newLine, pos - len([]rune(dir)) - } - return newLine, 0 -} - -func getPath(prompt string) string { - readline.SetAutoComplete(filenameCompleter{}) - defer readline.SetAutoComplete(nil) - - p, err := readline.Line(prompt) - fatalOnErr(err) - - if p == "" { - return p - } - - _ = readline.AddHistory(p) - - abs, err := filepath.Abs(p) - if err != nil { - fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err)) - } - - return abs -} - -func getConfirmation(def bool, prompt string) bool { - for { - s, err := readline.Line(prompt) - fatalOnErr(err) - - switch strings.ToLower(s) { - case "y", "yes": - return true - case "n", "no": - return false - default: - if len(s) == 0 { - return def - } - } - } -} - -func applyTemplate(c config) []byte { - tmpl, err := template.New("config").Parse(configTemplate) - fatalOnErr(err) - - b := bytes.NewBuffer(nil) - fatalOnErr(tmpl.Execute(b, c)) - - return b.Bytes() -} - -func fatalOnErr(err error) { - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) { - sideClient := initClient(n3config[network].MorphRPC) - balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract) - - sideActor, err := actor.NewSimple(sideClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err)) - } - - sideGas := nep17.NewReader(sideActor, balanceHash) - accSH := acc.Contract.ScriptHash() - - balance, err := sideGas.BalanceOf(accSH) - if err != nil { - fatalOnErr(fmt.Errorf("side chain balance: %w", err)) - } - - ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ", - fixedn.ToString(balance, 12))) - if !ok { - return - } - - amountStr := getString("Enter amount in GAS: ") - amount, err := fixedn.FromString(amountStr, 8) - if err != nil { - fatalOnErr(fmt.Errorf("invalid amount: %w", err)) - } - - mainClient := initClient(n3config[network].RPC) - neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract) - - mainActor, err := actor.NewSimple(mainClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err)) - } - - mainGas := nep17.New(mainActor, gas.Hash) - - txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil) - if err != nil { - fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err)) - } - - cmd.Print("Waiting for transactions to persist.") - tick := time.NewTicker(time.Second / 2) - defer tick.Stop() - - timer := time.NewTimer(time.Second * 20) - defer timer.Stop() - - at := trigger.Application - -loop: - for { - select { - case <-tick.C: - _, err := mainClient.GetApplicationLog(txHash, &at) - if err == nil { - cmd.Print("\n") - break loop - } - cmd.Print(".") - case <-timer.C: - cmd.Printf("\nTimeout while waiting for transaction to persist.\n") - if getConfirmation(false, "Continue configuration? yes/[no]: ") { - return - } - os.Exit(1) - } - } -} - -func initClient(rpc []string) *rpcclient.Client { - var c *rpcclient.Client - var err error - - shuffled := slices.Clone(rpc) - rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) - - for _, endpoint := range shuffled { - c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{ - DialTimeout: time.Second * 2, - RequestTimeout: time.Second * 5, - }) - if err != nil { - continue - } - if err = c.Init(); err != nil { - continue - } - return c - } - - fatalOnErr(fmt.Errorf("can't create N3 client: %w", err)) - panic("unreachable") -} diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index ceae36ae7..299d0a830 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -9,7 +9,6 @@ import ( "io" "os" "slices" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -77,9 +76,7 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain // SortedIDList returns sorted list of identifiers of user's containers. func (x ListContainersRes) SortedIDList() []cid.ID { list := x.cliRes.Containers() - slices.SortFunc(list, func(lhs, rhs cid.ID) int { - return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString()) - }) + slices.SortFunc(list, cid.ID.Cmp) return list } @@ -687,9 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes return nil, fmt.Errorf("read object list: %w", err) } - slices.SortFunc(list, func(a, b oid.ID) int { - return strings.Compare(a.EncodeToString(), b.EncodeToString()) - }) + slices.SortFunc(list, oid.ID.Cmp) return &SearchObjectsRes{ ids: list, @@ -863,6 +858,8 @@ type PatchObjectPrm struct { ReplaceAttribute bool + NewSplitHeader *objectSDK.SplitHeader + PayloadPatches []PayloadPatch } @@ -893,7 +890,11 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { return nil, fmt.Errorf("init payload reading: %w", err) } - if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) { + if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ + NewSplitHeader: prm.NewSplitHeader, + NewAttributes: prm.NewAttributes, + ReplaceAttributes: prm.ReplaceAttribute, + }) { for _, pp := range prm.PayloadPatches { payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) if err != nil { diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 2d9c45cbd..1eadfa2e1 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go index 88321176f..6ed21e107 100644 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ b/cmd/frostfs-cli/internal/commonflags/api.go @@ -9,7 +9,7 @@ const ( TTL = "ttl" TTLShorthand = "" TTLDefault = 2 - TTLUsage = "TTL value in request meta header" + TTLUsage = "The maximum number of intermediate nodes in the request route" XHeadersKey = "xhdr" XHeadersShorthand = "x" diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go index cd46d63eb..fad1f6183 100644 --- a/cmd/frostfs-cli/internal/commonflags/flags.go +++ b/cmd/frostfs-cli/internal/commonflags/flags.go @@ -28,7 +28,7 @@ const ( RPC = "rpc-endpoint" RPCShorthand = "r" RPCDefault = "" - RPCUsage = "Remote node address (as 'multiaddr' or ':')" + RPCUsage = "Remote node address (':' or 'grpcs://:')" Timeout = "timeout" TimeoutShorthand = "t" diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go index a86506c37..0927788ba 100644 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ b/cmd/frostfs-cli/modules/bearer/create.go @@ -44,6 +44,7 @@ is set to current epoch + n. _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) + _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) }, } @@ -81,7 +82,7 @@ func createToken(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) if iatRelative || expRelative || nvbRelative { - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) if len(endpoint) == 0 { commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) } diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go index 8c4ab14f8..fac6eb2cd 100644 --- a/cmd/frostfs-cli/modules/container/get.go +++ b/cmd/frostfs-cli/modules/container/get.go @@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod cmd.Println("created:", container.CreatedAt(cnr)) cmd.Println("attributes:") - cnr.IterateAttributes(func(key, val string) { + for key, val := range cnr.Attributes() { cmd.Printf("\t%s=%s\n", key, val) - }) + } cmd.Println("placement policy:") commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index bbb8da840..e4a023d91 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -102,9 +102,9 @@ func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, i cmd.Println(id.String()) if flagVarListPrintAttr { - cnr.IterateUserAttributes(func(key, val string) { + for key, val := range cnr.Attributes() { cmd.Printf(" %s: %s\n", key, val) - }) + } } } diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index dcd755510..cf4862b4a 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -5,7 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "maps" "os" + "slices" "strings" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -19,8 +21,9 @@ import ( ) type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo + cmd *cobra.Command + nodes map[string]netmap.NodeInfo + console *readline.Instance } func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { @@ -37,10 +40,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { i := 1 for id, node := range repl.nodes { var attrs []string - node.IterateAttributes(func(k, v string) { + for k, v := range node.Attributes() { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - }) - fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) + } + fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } return nil @@ -147,12 +150,29 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { for _, node := range ns { ids = append(ids, hex.EncodeToString(node.PublicKey())) } - fmt.Printf("\t%2d: %v\n", i+1, ids) + fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) } return nil } +func (repl *policyPlaygroundREPL) handleHelp(args []string) error { + if len(args) != 0 { + if _, ok := commands[args[0]]; !ok { + return fmt.Errorf("unknown command: %q", args[0]) + } + fmt.Fprintln(repl.console, commands[args[0]].usage) + return nil + } + + commandList := slices.Collect(maps.Keys(commands)) + slices.Sort(commandList) + for _, command := range commandList { + fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) + } + return nil +} + func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { var nm netmap.NetMap var nodes []netmap.NodeInfo @@ -163,15 +183,104 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } -var policyPlaygroundCompleter = readline.NewPrefixCompleter( - readline.PcItem("list"), - readline.PcItem("ls"), - readline.PcItem("add"), - readline.PcItem("load"), - readline.PcItem("remove"), - readline.PcItem("rm"), - readline.PcItem("eval"), -) +type commandDescription struct { + descriprion string + usage string +} + +var commands = map[string]commandDescription{ + "list": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + list + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "ls": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + ls + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "add": { + descriprion: "Add a new node: add attr=value", + usage: `Add a new node +Example of usage: + add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, + }, + + "load": { + descriprion: "Load netmap from file: load ", + usage: `Load netmap from file +Example of usage: + load "netmap.json" +File format (netmap.json): +{ + "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { + "continent": "Europe", + "country": "Poland" + }, + "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { + "continent": "Antarctica", + "country": "Heard Island" + } +}`, + }, + + "remove": { + descriprion: "Remove a node: remove ", + usage: `Remove a node +Example of usage: + remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "rm": { + descriprion: "Remove a node: rm ", + usage: `Remove a node +Example of usage: + rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "eval": { + descriprion: "Evaluate a policy: eval ", + usage: `Evaluate a policy +Example of usage: + eval REP 2`, + }, + + "help": { + descriprion: "Show available commands", + }, +} + +func (repl *policyPlaygroundREPL) handleCommand(args []string) error { + if len(args) == 0 { + return nil + } + + switch args[0] { + case "list", "ls": + return repl.handleLs(args[1:]) + case "add": + return repl.handleAdd(args[1:]) + case "load": + return repl.handleLoad(args[1:]) + case "remove", "rm": + return repl.handleRemove(args[1:]) + case "eval": + return repl.handleEval(args[1:]) + case "help": + return repl.handleHelp(args[1:]) + } + return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) +} func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { @@ -190,24 +299,32 @@ func (repl *policyPlaygroundREPL) run() error { } } - cmdHandlers := map[string]func([]string) error{ - "list": repl.handleLs, - "ls": repl.handleLs, - "add": repl.handleAdd, - "load": repl.handleLoad, - "remove": repl.handleRemove, - "rm": repl.handleRemove, - "eval": repl.handleEval, + if len(viper.GetString(netmapConfigPath)) > 0 { + err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) + commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) } + var cfgCompleter []readline.PrefixCompleterInterface + var helpSubItems []readline.PrefixCompleterInterface + + for name := range commands { + if name != "help" { + cfgCompleter = append(cfgCompleter, readline.PcItem(name)) + helpSubItems = append(helpSubItems, readline.PcItem(name)) + } + } + + cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) + completer := readline.NewPrefixCompleter(cfgCompleter...) rl, err := readline.NewEx(&readline.Config{ Prompt: "> ", InterruptPrompt: "^C", - AutoComplete: policyPlaygroundCompleter, + AutoComplete: completer, }) if err != nil { return fmt.Errorf("error initializing readline: %w", err) } + repl.console = rl defer rl.Close() var exit bool @@ -225,17 +342,8 @@ func (repl *policyPlaygroundREPL) run() error { } exit = false - parts := strings.Fields(line) - if len(parts) == 0 { - continue - } - cmd := parts[0] - if handler, exists := cmdHandlers[cmd]; exists { - if err := handler(parts[1:]); err != nil { - fmt.Printf("error: %v\n", err) - } - } else { - fmt.Printf("error: unknown command %q\n", cmd) + if err := repl.handleCommand(strings.Fields(line)); err != nil { + fmt.Fprintf(repl.console, "error: %v\n", err) } } } @@ -251,6 +359,14 @@ If a wallet and endpoint is provided, the initial netmap data will be loaded fro }, } +const ( + netmapConfigPath = "netmap-config" + netmapConfigUsage = "Path to the netmap configuration file" +) + func initContainerPolicyPlaygroundCmd() { commonflags.Init(policyPlaygroundCmd) + policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) + + _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) } diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 8032bf09a..b8d7eb046 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftMinutes := int(leftSeconds / 60) - sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes)) + fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) } func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR hour := int(duration.Seconds() / 3600) minute := int(duration.Seconds()/60) % 60 second := int(duration.Seconds()) % 60 - sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second)) + fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) } } func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if resp.GetBody().GetStartedAt() != nil { startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339))) + fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) } } func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if len(resp.GetBody().GetErrorMessage()) > 0 { - sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage())) + fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) } } @@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes default: status = "undefined" } - sb.WriteString(fmt.Sprintf(" Status: %s.", status)) + fmt.Fprintf(sb, " Status: %s.", status) } func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR } func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", + fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetTotalObjects(), resp.GetBody().GetFailedObjects(), resp.GetBody().GetSkippedObjects(), resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees())) + resp.GetBody().GetFailedTrees()) } func initControlEvacuationShardCmd() { diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 8bd2dc9cd..3142d02e7 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) { tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) _, _ = tw.Write([]byte("#\tName\tType\n")) for i, t := range targets { - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) } _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go new file mode 100644 index 000000000..4cb4be539 --- /dev/null +++ b/cmd/frostfs-cli/modules/control/locate.go @@ -0,0 +1,117 @@ +package control + +import ( + "bytes" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/mr-tron/base58" + "github.com/spf13/cobra" +) + +const ( + FullInfoFlag = "full" + FullInfoFlagUsage = "Print full ShardInfo." +) + +var locateObjectCmd = &cobra.Command{ + Use: "locate-object", + Short: "List shards storing the object", + Long: "List shards storing the object", + Run: locateObject, +} + +func initControlLocateObjectCmd() { + initControlFlags(locateObjectCmd) + + flags := locateObjectCmd.Flags() + + flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag) + + flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag) + + flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.") + flags.Bool(FullInfoFlag, false, FullInfoFlagUsage) +} + +func locateObject(cmd *cobra.Command, _ []string) { + var cnr cid.ID + var obj oid.ID + + _ = object.ReadObjectAddress(cmd, &cnr, &obj) + + pk := key.Get(cmd) + + body := new(control.ListShardsForObjectRequest_Body) + body.SetContainerId(cnr.EncodeToString()) + body.SetObjectId(obj.EncodeToString()) + req := new(control.ListShardsForObjectRequest) + req.SetBody(body) + signRequest(cmd, pk, req) + + cli := getClient(cmd, pk) + + var err error + var resp *control.ListShardsForObjectResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + resp, err = control.ListShardsForObject(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + shardIDs := resp.GetBody().GetShard_ID() + + isFull, _ := cmd.Flags().GetBool(FullInfoFlag) + if !isFull { + for _, id := range shardIDs { + cmd.Println(base58.Encode(id)) + } + return + } + + // get full shard info + listShardsReq := new(control.ListShardsRequest) + listShardsReq.SetBody(new(control.ListShardsRequest_Body)) + signRequest(cmd, pk, listShardsReq) + var listShardsResp *control.ListShardsResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + listShardsResp, err = control.ListShards(client, listShardsReq) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody()) + + shards := listShardsResp.GetBody().GetShards() + sortShardsByID(shards) + shards = filterShards(shards, shardIDs) + + isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) + if isJSON { + prettyPrintShardsJSON(cmd, shards) + } else { + prettyPrintShards(cmd, shards) + } +} + +func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo { + var res []control.ShardInfo + for _, id := range ids { + for _, inf := range info { + if bytes.Equal(inf.Shard_ID, id) { + res = append(res, inf) + } + } + } + return res +} diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go index b20d3618e..3abfe80cb 100644 --- a/cmd/frostfs-cli/modules/control/root.go +++ b/cmd/frostfs-cli/modules/control/root.go @@ -39,6 +39,7 @@ func init() { listRulesCmd, getRuleCmd, listTargetsCmd, + locateObjectCmd, ) initControlHealthCheckCmd() @@ -52,4 +53,5 @@ func init() { initControlListRulesCmd() initControGetRuleCmd() initControlListTargetsCmd() + initControlLocateObjectCmd() } diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go index 80e4a0c87..d0c9a641b 100644 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ b/cmd/frostfs-cli/modules/control/writecache.go @@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{ Use: "seal", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", - Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.", + Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", Run: sealWritecache, } diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index ae4bb329a..5da66dcd9 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -62,11 +62,11 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("state:", stateWord) - netmap.IterateNetworkEndpoints(i, func(s string) { + for s := range i.NetworkEndpoints() { cmd.Println("address:", s) - }) + } - i.IterateAttributes(func(key, value string) { + for key, value := range i.Attributes() { cmd.Printf("attribute: %s=%s\n", key, value) - }) + } } diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go index e4e9cddb8..08a9ac4c8 100644 --- a/cmd/frostfs-cli/modules/object/delete.go +++ b/cmd/frostfs-cli/modules/object/delete.go @@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) } - objAddr = readObjectAddress(cmd, &cnr, &obj) + objAddr = ReadObjectAddress(cmd, &cnr, &obj) } pk := key.GetOrGenerate(cmd) diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go index f1edccba2..7312f5384 100644 --- a/cmd/frostfs-cli/modules/object/get.go +++ b/cmd/frostfs-cli/modules/object/get.go @@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) filename := cmd.Flag(fileFlag).Value.String() out, closer := createOutWriter(cmd, filename) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index d8ea449eb..25df375d4 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index 70c273443..97e996cad 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index 53dd01868..d67db9f0d 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -18,6 +18,7 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "github.com/spf13/viper" ) // object lock command. @@ -78,7 +79,7 @@ var objectLockCmd = &cobra.Command{ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 1500830a2..476238651 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "sync" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -48,6 +49,12 @@ type ecHeader struct { parent oid.ID } +type objectCounter struct { + sync.Mutex + total uint32 + isECcounted bool +} + type objectPlacement struct { requiredNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo @@ -56,6 +63,7 @@ type objectPlacement struct { type objectNodesResult struct { errors []error placements map[oid.ID]objectPlacement + total uint32 } type ObjNodesDataObject struct { @@ -101,23 +109,23 @@ func initObjectNodesCmd() { func objectNodes(cmd *cobra.Command, _ []string) { var cnrID cid.ID var objID oid.ID - readObjectAddress(cmd, &cnrID, &objID) + ReadObjectAddress(cmd, &cnrID, &objID) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - objects := getPhyObjects(cmd, cnrID, objID, cli, pk) + objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - getActualPlacement(cmd, netmap, pk, objects, result) + getActualPlacement(cmd, netmap, pk, objects, count, result) printPlacement(cmd, objID, objects, result) } -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject { +func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -145,7 +153,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C parent: res.Header().ECHeader().Parent(), } } - return []phyObject{obj} + return []phyObject{obj}, 1 } var errSplitInfo *objectSDK.SplitInfoError @@ -155,29 +163,34 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError) + return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 } commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil + return nil, 0 } -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject { - members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead) +func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { + members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) + return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total } -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID { +func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { + var total int splitInfo := errSplitInfo.SplitInfo() if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { - return members + if total = len(members); total > 0 { + total-- // linking object is not data object + } + return members, total } if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members + return members, len(members) } - return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + return members, len(members) } func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { @@ -383,8 +396,11 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem } } -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) { +func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { resultMtx := &sync.Mutex{} + counter := &objectCounter{ + total: uint32(count), + } candidates := getNodesToCheckObjectExistance(cmd, netmap, result) @@ -401,7 +417,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. for _, object := range objects { eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) + stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) resultMtx.Lock() defer resultMtx.Unlock() if err == nil && stored { @@ -420,6 +436,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) + result.total = counter.total } func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { @@ -444,17 +461,11 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N var cli *client.Client var addresses []string if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) addresses = append(addresses, candidate.ExternalAddresses()...) } else { addresses = append(addresses, candidate.ExternalAddresses()...) - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) } var lastErr error @@ -478,7 +489,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N return cli, nil } -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) { +func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -493,6 +504,14 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, res, err := internalclient.HeadObject(ctx, prmHead) if err == nil && res != nil { + if res.Header().ECHeader() != nil { + counter.Lock() + defer counter.Unlock() + if !counter.isECcounted { + counter.total *= res.Header().ECHeader().Total() + } + counter.isECcounted = true + } return true, nil } var notFound *apistatus.ObjectNotFound @@ -512,7 +531,8 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) + fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) + fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) for _, object := range objects { fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index ebc415b2f..ebbde76a2 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -2,6 +2,7 @@ package object import ( "fmt" + "os" "strconv" "strings" @@ -9,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -20,6 +22,7 @@ const ( replaceAttrsFlagName = "replace-attrs" rangeFlagName = "range" payloadFlagName = "payload" + splitHeaderFlagName = "split-header" ) var objectPatchCmd = &cobra.Command{ @@ -50,13 +53,14 @@ func initObjectPatchCmd() { flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") + flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") } func patch(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeSlice(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -84,6 +88,8 @@ func patch(cmd *cobra.Command, _ []string) { prm.NewAttributes = newAttrs prm.ReplaceAttribute = replaceAttrs + prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) + for i := range ranges { prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ Range: ranges[i], @@ -147,3 +153,22 @@ func patchPayloadPaths(cmd *cobra.Command) []string { v, _ := cmd.Flags().GetStringSlice(payloadFlagName) return v } + +func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { + path, _ := cmd.Flags().GetString(splitHeaderFlagName) + if path == "" { + return nil + } + + data, err := os.ReadFile(path) + commonCmd.ExitOnErr(cmd, "read file error: %w", err) + + splitHdrV2 := new(objectV2.SplitHeader) + err = splitHdrV2.Unmarshal(data) + if err != nil { + err = splitHdrV2.UnmarshalJSON(data) + commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) + } + + return objectSDK.NewSplitHeaderFromV2(splitHdrV2) +} diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index 8f59906ca..6ec508ae2 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool { if ok { toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toProto, _ := cmd.Flags().GetBool("proto") - if !(toJSON || toProto) { + if !toJSON && !toProto { cmd.PrintErrln("Object is erasure-encoded, ec information received.") } printECInfo(cmd, errECInfo.ECInfo()) diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index b090c9f8c..8e4e8b287 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string { return xs } -func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { +func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { readCID(cmd, cnr) readOID(cmd, obj) @@ -262,13 +262,8 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client if _, ok := dst.(*internal.DeleteObjectPrm); ok { common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - rels := collectObjectRelatives(cmd, cli, cnr, *obj) - - if len(rels) == 0 { - objs = []oid.ID{*obj} - } else { - objs = append(rels, *obj) - } + objs = collectObjectRelatives(cmd, cli, cnr, *obj) + objs = append(objs, *obj) } } diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index a70624ac8..d71a94b98 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,18 +2,19 @@ package tree import ( "context" + "crypto/tls" "fmt" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -32,23 +33,29 @@ func _client() (tree.TreeServiceClient, error) { return nil, err } + host, isTLS, err := client.ParseURI(netAddr.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), + grpc.WithTransportCredentials(creds), } - if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) + cc, err := grpc.NewClient(host, opts...) return tree.NewTreeServiceClient(cc), err } diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 09af08525..13a747ba6 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -4,11 +4,14 @@ import ( "context" "os" "os/signal" + "strconv" "syscall" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "github.com/spf13/cast" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -38,13 +41,33 @@ func reloadConfig() error { } cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) audit.Store(cfg.GetBool("audit.enabled")) + var logPrm logger.Prm err = logPrm.SetLevelString(cfg.GetString("logger.level")) if err != nil { return err } - logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + err = logPrm.SetTags(loggerTags()) + if err != nil { + return err + } + logger.UpdateLevelForTags(logPrm) - return logPrm.Reload() + return nil +} + +func loggerTags() [][]string { + var res [][]string + for i := 0; ; i++ { + var item []string + index := strconv.FormatInt(int64(i), 10) + names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) + if names == "" { + break + } + item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) + res = append(res, item) + } + return res } func watchForSignal(ctx context.Context, cancel func()) { diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index ade64ba84..799feb784 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -31,7 +31,6 @@ const ( var ( wg = new(sync.WaitGroup) intErr = make(chan error) // internal inner ring errors - logPrm = new(logger.Prm) innerRing *innerring.Server pprofCmp *pprofComponent metricsCmp *httpComponent @@ -70,6 +69,7 @@ func main() { metrics := irMetrics.NewInnerRingMetrics() + var logPrm logger.Prm err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) @@ -80,10 +80,14 @@ func main() { exitErr(err) logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + err = logPrm.SetTags(loggerTags()) + exitErr(err) log, err = logger.NewLogger(logPrm) exitErr(err) + logger.UpdateLevelForTags(logPrm) + ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go index 5a41f945c..7b0e25f3d 100644 --- a/cmd/frostfs-lens/internal/meta/tui.go +++ b/cmd/frostfs-lens/internal/meta/tui.go @@ -2,13 +2,17 @@ package meta import ( "context" + "encoding/binary" + "errors" "fmt" common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" + schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" + "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -27,6 +31,11 @@ Available search filters: var initialPrompt string +var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{ + 2: schema.MetabaseParserV2, + 3: schema.MetabaseParserV3, +} + func init() { common.AddComponentPathFlag(tuiCMD, &vPath) @@ -49,12 +58,22 @@ func runTUI(cmd *cobra.Command) error { } defer db.Close() + schemaVersion, hasVersion := lookupSchemaVersion(cmd, db) + if !hasVersion { + return errors.New("couldn't detect schema version") + } + + metabaseParser, ok := parserPerSchemaVersion[schemaVersion] + if !ok { + return fmt.Errorf("unknown schema version %d", schemaVersion) + } + // Need if app was stopped with Ctrl-C. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil) + ui := tui.NewUI(ctx, app, db, metabaseParser, nil) _ = ui.AddFilter("cid", tui.CIDParser, "CID") _ = ui.AddFilter("oid", tui.OIDParser, "OID") @@ -69,3 +88,31 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } + +var ( + shardInfoBucket = []byte{5} + versionRecord = []byte("version") +) + +func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) { + err := db.View(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(shardInfoBucket) + if bkt == nil { + return nil + } + rec := bkt.Get(versionRecord) + if rec == nil { + return nil + } + + version = binary.LittleEndian.Uint64(rec) + ok = true + + return nil + }) + if err != nil { + common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err)) + } + + return +} diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go index 9bad19032..077a68785 100644 --- a/cmd/frostfs-lens/internal/schema/common/schema.go +++ b/cmd/frostfs-lens/internal/schema/common/schema.go @@ -3,6 +3,8 @@ package common import ( "errors" "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) type FilterResult byte @@ -71,11 +73,7 @@ func (fp FallbackParser) ToParser() Parser { func (p Parser) ToFallbackParser() FallbackParser { return func(key, value []byte) (SchemaEntry, Parser) { entry, next, err := p(key, value) - if err != nil { - panic(fmt.Errorf( - "couldn't use that parser as a fallback parser, it returned an error: %w", err, - )) - } + assert.NoError(err, "couldn't use that parser as a fallback parser") return entry, next } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go index 24cc0e52d..4e6bbf08a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go @@ -80,10 +80,15 @@ var ( }, ) - UserAttributeParser = NewUserAttributeKeyBucketParser( + UserAttributeParserV2 = NewUserAttributeKeyBucketParser( NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), ) + UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys( + NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), + []string{"FilePath", "S3-Access-Box-CRDT-Name"}, + ) + PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ cidResolver: StrictResolver, oidResolver: StrictResolver, @@ -108,4 +113,14 @@ var ( cidResolver: StrictResolver, oidResolver: LenientResolver, }) + + ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }) + + ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) ) diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go index 2fb122940..42a24c594 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go @@ -22,27 +22,31 @@ const ( Split ContainerCounters ECInfo + ExpirationEpochToObject + ObjectToExpirationEpoch ) var x = map[Prefix]string{ - Graveyard: "Graveyard", - Garbage: "Garbage", - ToMoveIt: "To Move It", - ContainerVolume: "Container Volume", - Locked: "Locked", - ShardInfo: "Shard Info", - Primary: "Primary", - Lockers: "Lockers", - Tombstone: "Tombstone", - Small: "Small", - Root: "Root", - Owner: "Owner", - UserAttribute: "User Attribute", - PayloadHash: "Payload Hash", - Parent: "Parent", - Split: "Split", - ContainerCounters: "Container Counters", - ECInfo: "EC Info", + Graveyard: "Graveyard", + Garbage: "Garbage", + ToMoveIt: "To Move It", + ContainerVolume: "Container Volume", + Locked: "Locked", + ShardInfo: "Shard Info", + Primary: "Primary", + Lockers: "Lockers", + Tombstone: "Tombstone", + Small: "Small", + Root: "Root", + Owner: "Owner", + UserAttribute: "User Attribute", + PayloadHash: "Payload Hash", + Parent: "Parent", + Split: "Split", + ContainerCounters: "Container Counters", + ECInfo: "EC Info", + ExpirationEpochToObject: "Exp. Epoch to Object", + ObjectToExpirationEpoch: "Object to Exp. Epoch", } func (p Prefix) String() string { diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go index db90bddbd..62d126f88 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go @@ -9,7 +9,7 @@ import ( func (b *PrefixBucket) String() string { return common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ) } @@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string { return fmt.Sprintf( "%s CID %s", common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple(b.id.String(), tcell.ColorAqua), ) @@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string { func (b *UserAttributeKeyBucket) String() string { return fmt.Sprintf("%s CID %s ATTR-KEY %s", common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple( fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go index 82b47dd85..7355c3d9e 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go @@ -2,6 +2,7 @@ package buckets import ( "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -57,10 +58,11 @@ var ( ) var ( - ErrNotBucket = errors.New("not a bucket") - ErrInvalidKeyLength = errors.New("invalid key length") - ErrInvalidValueLength = errors.New("invalid value length") - ErrInvalidPrefix = errors.New("invalid prefix") + ErrNotBucket = errors.New("not a bucket") + ErrInvalidKeyLength = errors.New("invalid key length") + ErrInvalidValueLength = errors.New("invalid value length") + ErrInvalidPrefix = errors.New("invalid prefix") + ErrUnexpectedAttributeKey = errors.New("unexpected attribute key") ) func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { @@ -132,6 +134,10 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa } func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { + return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil) +} + +func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser { return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { if value != nil { return nil, nil, ErrNotBucket @@ -147,6 +153,11 @@ func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { return nil, nil, err } b.key = string(key[33:]) + + if len(keys) != 0 && !slices.Contains(keys, b.key) { + return nil, nil, ErrUnexpectedAttributeKey + } + return &b, next, nil } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go index ea095e207..4cc9e8765 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/parser.go +++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go @@ -5,7 +5,30 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" ) -var MetabaseParser = common.WithFallback( +var MetabaseParserV3 = common.WithFallback( + common.Any( + buckets.GraveyardParser, + buckets.GarbageParser, + buckets.ContainerVolumeParser, + buckets.LockedParser, + buckets.ShardInfoParser, + buckets.PrimaryParser, + buckets.LockersParser, + buckets.TombstoneParser, + buckets.SmallParser, + buckets.RootParser, + buckets.UserAttributeParserV3, + buckets.ParentParser, + buckets.SplitParser, + buckets.ContainerCountersParser, + buckets.ECInfoParser, + buckets.ExpirationEpochToObjectParser, + buckets.ObjectToExpirationEpochParser, + ), + common.RawParser.ToFallbackParser(), +) + +var MetabaseParserV2 = common.WithFallback( common.Any( buckets.GraveyardParser, buckets.GarbageParser, @@ -18,7 +41,7 @@ var MetabaseParser = common.WithFallback( buckets.SmallParser, buckets.RootParser, buckets.OwnerParser, - buckets.UserAttributeParser, + buckets.UserAttributeParserV2, buckets.PayloadHashParser, buckets.ParentParser, buckets.SplitParser, diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go index 2dda15b4f..477c4fc9d 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go @@ -63,3 +63,11 @@ func (r *ContainerCountersRecord) DetailedString() string { func (r *ECInfoRecord) DetailedString() string { return spew.Sdump(*r) } + +func (r *ExpirationEpochToObjectRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ObjectToExpirationEpochRecord) DetailedString() string { + return spew.Sdump(*r) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go index 880a7a8ff..e038911d7 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go @@ -143,3 +143,26 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult { return common.No } } + +func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go index 1b070e2a0..5d846cb75 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go @@ -249,3 +249,45 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e } return &r, nil, nil } + +func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 72 { + return nil, nil, ErrInvalidKeyLength + } + + var ( + r ExpirationEpochToObjectRecord + err error + ) + + r.epoch = binary.BigEndian.Uint64(key[:8]) + if err = r.cnt.Decode(key[8:40]); err != nil { + return nil, nil, err + } + if err = r.obj.Decode(key[40:]); err != nil { + return nil, nil, err + } + + return &r, nil, nil +} + +func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + if len(value) != 8 { + return nil, nil, ErrInvalidValueLength + } + + var ( + r ObjectToExpirationEpochRecord + err error + ) + + if err = r.obj.Decode(key); err != nil { + return nil, nil, err + } + r.epoch = binary.LittleEndian.Uint64(value) + + return &r, nil, nil +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go index ec0ab8e1a..f71244625 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go @@ -2,6 +2,7 @@ package records import ( "fmt" + "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "github.com/gdamore/tcell/v2" @@ -133,3 +134,22 @@ func (r *ECInfoRecord) String() string { len(r.ids), ) } + +func (r *ExpirationEpochToObjectRecord) String() string { + return fmt.Sprintf( + "exp. epoch %s %c CID %s OID %s", + common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + ) +} + +func (r *ObjectToExpirationEpochRecord) String() string { + return fmt.Sprintf( + "OID %s %c exp. epoch %s", + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua), + ) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go index 34c1c29fd..0809cad1a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go @@ -79,4 +79,15 @@ type ( id oid.ID ids []oid.ID } + + ExpirationEpochToObjectRecord struct { + epoch uint64 + cnt cid.ID + obj oid.ID + } + + ObjectToExpirationEpochRecord struct { + obj oid.ID + epoch uint64 + } ) diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go index 7d70b27b2..3bfe2608b 100644 --- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go +++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go @@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, r.addr.SetContainer(cnr) r.addr.SetObject(obj) - r.data = value[:] + r.data = value return &r, nil, nil } diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go index 4fdf97119..471514e5d 100644 --- a/cmd/frostfs-lens/internal/tui/input.go +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -1,6 +1,8 @@ package tui import ( + "slices" + "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -26,7 +28,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) { // Used history data for search prompt, so just make that data recent. if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { - f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...) + f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) f.history = append(f.history, s) } @@ -51,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo f.historyPointer++ // Stop iterating over history. if f.historyPointer == len(f.history) { - f.InputField.SetText(f.currentContent) + f.SetText(f.currentContent) return } - f.InputField.SetText(f.history[f.historyPointer]) + f.SetText(f.history[f.historyPointer]) case tcell.KeyUp: if len(f.history) == 0 { return } // Start iterating over history. if f.historyPointer == len(f.history) { - f.currentContent = f.InputField.GetText() + f.currentContent = f.GetText() } // End of history. if f.historyPointer == 0 { @@ -69,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo } // Iterate to least recent prompts. f.historyPointer-- - f.InputField.SetText(f.history[f.historyPointer]) + f.SetText(f.history[f.historyPointer]) default: f.InputField.InputHandler()(event, func(tview.Primitive) {}) } diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go index 5f61df884..a4d392ab3 100644 --- a/cmd/frostfs-lens/internal/tui/records.go +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -8,6 +8,7 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -94,9 +95,7 @@ func (v *RecordsView) Mount(ctx context.Context) error { } func (v *RecordsView) Unmount() { - if v.onUnmount == nil { - panic("try to unmount not mounted component") - } + assert.False(v.onUnmount == nil, "try to unmount not mounted component") v.onUnmount() v.onUnmount = nil } diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go index bcc082821..cc6b7859e 100644 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { return } - switch ui.mountedPage.(type) { + switch v := ui.mountedPage.(type) { case *BucketsView: ui.moveNextPage(NewBucketsView(ui, res)) case *RecordsView: - bucket := ui.mountedPage.(*RecordsView).bucket + bucket := v.bucket ui.moveNextPage(NewRecordsView(ui, bucket, res)) } @@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) } - ui.Box.MouseHandler() + ui.MouseHandler() } func (ui *UI) WithPrompt(prompt string) error { diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index e761a1b14..513314712 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -14,7 +14,7 @@ import ( func initAPEManagerService(c *cfg) { contractStorage := ape_contract.NewProxyVerificationContractStorage( morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.shared.key, + c.key, c.cfgMorph.proxyScriptHash, c.cfgObject.cfgAccessPolicyEngine.policyContractHash) diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go index 64c3beba7..ce8ae9662 100644 --- a/cmd/frostfs-node/attributes.go +++ b/cmd/frostfs-node/attributes.go @@ -6,9 +6,5 @@ import ( ) func parseAttributes(c *cfg) { - if nodeconfig.Relay(c.appCfg) { - return - } - fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) } diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 0fe56d2b0..e5df0a22d 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,20 +1,27 @@ package main import ( + "bytes" + "cmp" "context" + "slices" "sync" + "sync/atomic" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/hashicorp/golang-lru/v2/simplelru" + "go.uber.org/zap" ) type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) @@ -110,55 +117,6 @@ func (c *ttlNetCache[K, V]) remove(key K) { hit = c.cache.Remove(key) } -// entity that provides LRU cache interface. -type lruNetCache struct { - cache *lru.Cache[uint64, *netmapSDK.NetMap] - - netRdr netValueReader[uint64, *netmapSDK.NetMap] - - metrics cacheMetrics -} - -// newNetworkLRUCache returns wrapper over netValueReader with LRU cache. -func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache { - cache, err := lru.New[uint64, *netmapSDK.NetMap](sz) - fatalOnErr(err) - - return &lruNetCache{ - cache: cache, - netRdr: netRdr, - metrics: metrics, - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - val, ok := c.cache.Get(key) - if ok { - hit = true - return val, nil - } - - val, err := c.netRdr(ctx, key) - if err != nil { - return nil, err - } - - c.cache.Add(key, val) - - return val, nil -} - // wrapper over TTL cache of values read from the network // that implements container storage. type ttlContainerStorage struct { @@ -200,20 +158,222 @@ func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*con type lruNetmapSource struct { netState netmap.State - cache *lruNetCache + client rawSource + cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] + mtx sync.RWMutex + metrics cacheMetrics + log *logger.Logger + candidates atomic.Pointer[[]netmapSDK.NodeInfo] } -func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { +type rawSource interface { + GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) +} + +func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, + netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, +) netmap.Source { const netmapCacheSize = 10 - lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - return v.GetNetMapByEpoch(ctx, key) - }, metrics.NewCacheMetrics("netmap")) + cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) + fatalOnErr(err) - return &lruNetmapSource{ - netState: s, - cache: lruNetmapCache, + src := &lruNetmapSource{ + netState: netState, + client: client, + cache: cache, + log: log, + metrics: metrics.NewCacheMetrics("netmap"), } + + wg.Add(1) + go func() { + defer wg.Done() + src.updateCandidates(ctx, d) + }() + + return src +} + +// updateCandidates routine to merge netmap in cache with candidates list. +func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { + timer := time.NewTimer(d) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + newCandidates, err := s.client.GetCandidates(ctx) + if err != nil { + s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) + timer.Reset(d) + break + } + if len(newCandidates) == 0 { + s.candidates.Store(&newCandidates) + timer.Reset(d) + break + } + slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + return cmp.Compare(n1.Hash(), n2.Hash()) + }) + + // Check once state changed + v := s.candidates.Load() + if v == nil { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + timer.Reset(d) + break + } + ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || + uint32(n1.Status()) != uint32(n2.Status()) || + slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { + return 1 + } + ne1 := slices.Collect(n1.NetworkEndpoints()) + ne2 := slices.Collect(n2.NetworkEndpoints()) + return slices.Compare(ne1, ne2) + }) + if ret != 0 { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + } + timer.Reset(d) + } + } +} + +func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { + s.mtx.Lock() + tmp := s.cache.Values() + s.mtx.Unlock() + for _, pointer := range tmp { + nm := pointer.Load() + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + nm = nm.Clone() + mergeNetmapWithCandidates(updates, nm) + pointer.Store(nm) + } + } +} + +// reads value by the key. +// +// updates the value from the network on cache miss. +// +// returned value should not be modified. +func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + hit := false + startedAt := time.Now() + defer func() { + s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) + }() + + s.mtx.RLock() + val, ok := s.cache.Get(key) + s.mtx.RUnlock() + if ok { + hit = true + return val.Load(), nil + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + val, ok = s.cache.Get(key) + if ok { + hit = true + return val.Load(), nil + } + + nm, err := s.client.GetNetMapByEpoch(ctx, key) + if err != nil { + return nil, err + } + v := s.candidates.Load() + if v != nil { + updates := getNetMapNodesToUpdate(nm, *v) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + } + + p := atomic.Pointer[netmapSDK.NetMap]{} + p.Store(nm) + s.cache.Add(key, &p) + + return nm, nil +} + +// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. +func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { + for _, v := range updates { + if v.status != netmapSDK.UnspecifiedState { + nm.Nodes()[v.netmapIndex].SetStatus(v.status) + } + if v.externalAddresses != nil { + nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) + } + if v.endpoints != nil { + nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) + } + } +} + +type nodeToUpdate struct { + netmapIndex int + status netmapSDK.NodeState + externalAddresses []string + endpoints []string +} + +// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. +func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { + var res []nodeToUpdate + for i := range nm.Nodes() { + for _, cnd := range candidates { + if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { + var tmp nodeToUpdate + var update bool + + if cnd.Status() != nm.Nodes()[i].Status() && + (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { + update = true + tmp.status = cnd.Status() + } + + externalAddresses := cnd.ExternalAddresses() + if externalAddresses != nil && + slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { + update = true + tmp.externalAddresses = externalAddresses + } + + nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) + nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) + candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) + candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) + if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { + update = true + tmp.endpoints = candidateEndpoints + } + + if update { + tmp.netmapIndex = i + res = append(res, tmp) + } + + break + } + } + } + return res } func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { @@ -225,7 +385,7 @@ func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (* } func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.cache.get(ctx, epoch) + val, err := s.get(ctx, epoch) if err != nil { return nil, err } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index b1601aa67..24286826f 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -3,9 +3,11 @@ package main import ( "context" "errors" + "sync" "testing" "time" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -59,3 +61,75 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) { type noopCacheMetricts struct{} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} + +type rawSrc struct{} + +func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Online) + node0.SetExternalAddresses("1", "0") + node0.SetNetworkEndpoints("1", "0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Online) + node1.SetExternalAddresses("1", "0") + node1.SetNetworkEndpoints("1", "0") + + return []netmapSDK.NodeInfo{node0, node1}, nil +} + +func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm := netmapSDK.NetMap{} + nm.SetEpoch(1) + + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Maintenance) + node0.SetExternalAddresses("0") + node0.SetNetworkEndpoints("0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Maintenance) + node1.SetExternalAddresses("0") + node1.SetNetworkEndpoints("0") + + nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) + + return &nm, nil +} + +type st struct{} + +func (s *st) CurrentEpoch() uint64 { + return 1 +} + +func TestNetmapStorage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) + + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) + require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) + require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) + + require.Eventually(t, func() bool { + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + for _, node := range nm.Nodes() { + if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && + node.NumberOfNetworkEndpoints() == 2) { + return false + } + } + return true + }, time.Second*5, time.Millisecond*10) + + cancel() + wg.Wait() +} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 75d6f6dec..96274e625 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -30,15 +30,18 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -69,6 +72,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -106,6 +110,8 @@ type applicationConfiguration struct { level string destination string timestamp bool + options []zap.Option + tags [][]string } ObjectCfg struct { @@ -115,7 +121,6 @@ type applicationConfiguration struct { EngineCfg struct { errorThreshold uint32 - shardPoolSize uint32 shards []shardCfg lowMem bool } @@ -125,15 +130,13 @@ type applicationConfiguration struct { } type shardCfg struct { - compress bool - estimateCompressibility bool - estimateCompressibilityThreshold float64 + compression compression.Config smallSizeObjectLimit uint64 - uncompressableContentType []string refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode + limiter qos.Limiter metaCfg struct { path string @@ -230,62 +233,71 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) + var opts []zap.Option + if loggerconfig.ToLokiConfig(c).Enabled { + opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) + return lokiCore + })} + } + a.LoggerCfg.options = opts + a.LoggerCfg.tags = loggerconfig.Tags(c) // Object a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) - var pm []placement.Metric - for _, raw := range objectconfig.Get(c).Priority() { - m, err := placement.ParseMetric(raw) - if err != nil { - return err - } - pm = append(pm, m) + locodeDBPath := nodeconfig.LocodeDBPath(c) + parser, err := placement.NewMetricsParser(locodeDBPath) + if err != nil { + return fmt.Errorf("metrics parser creation: %w", err) } - a.ObjectCfg.priorityMetrics = pm + m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) + if err != nil { + return fmt.Errorf("parse metrics: %w", err) + } + a.ObjectCfg.priorityMetrics = m // Storage Engine a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) - a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) } -func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error { - var newConfig shardCfg +func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { + var target shardCfg - newConfig.refillMetabase = oldConfig.RefillMetabase() - newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount() - newConfig.mode = oldConfig.Mode() - newConfig.compress = oldConfig.Compress() - newConfig.estimateCompressibility = oldConfig.EstimateCompressibility() - newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold() - newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes() - newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit() + target.refillMetabase = source.RefillMetabase() + target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() + target.mode = source.Mode() + target.compression = source.Compression() + target.smallSizeObjectLimit = source.SmallSizeLimit() - a.setShardWriteCacheConfig(&newConfig, oldConfig) + a.setShardWriteCacheConfig(&target, source) - a.setShardPiloramaConfig(c, &newConfig, oldConfig) + a.setShardPiloramaConfig(c, &target, source) - if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil { + if err := a.setShardStorageConfig(&target, source); err != nil { return err } - a.setMetabaseConfig(&newConfig, oldConfig) + a.setMetabaseConfig(&target, source) - a.setGCConfig(&newConfig, oldConfig) + a.setGCConfig(&target, source) + if err := a.setLimiter(&target, source); err != nil { + return err + } - a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) + a.EngineCfg.shards = append(a.EngineCfg.shards, target) return nil } -func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - writeCacheCfg := oldConfig.WriteCache() +func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { + writeCacheCfg := source.WriteCache() if writeCacheCfg.Enabled() { - wc := &newConfig.writecacheCfg + wc := &target.writecacheCfg wc.enabled = true wc.path = writeCacheCfg.Path() @@ -298,10 +310,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, } } -func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) { +func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := oldConfig.Pilorama() - pr := &newConfig.piloramaCfg + piloramaCfg := source.Pilorama() + pr := &target.piloramaCfg pr.enabled = true pr.path = piloramaCfg.Path() @@ -312,8 +324,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC } } -func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error { - blobStorCfg := oldConfig.BlobStor() +func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { + blobStorCfg := source.BlobStor() storagesCfg := blobStorCfg.Storages() ss := make([]subStorageCfg, 0, len(storagesCfg)) @@ -347,13 +359,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol ss = append(ss, sCfg) } - newConfig.subStorages = ss + target.subStorages = ss return nil } -func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - metabaseCfg := oldConfig.Metabase() - m := &newConfig.metaCfg +func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { + metabaseCfg := source.Metabase() + m := &target.metaCfg m.path = metabaseCfg.Path() m.perm = metabaseCfg.BoltDB().Perm() @@ -361,12 +373,22 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() } -func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - gcCfg := oldConfig.GC() - newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { + gcCfg := source.GC() + target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() + target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() + target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() + target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +} + +func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { + limitsConfig := source.Limits().ToConfig() + limiter, err := qos.NewLimiter(limitsConfig) + if err != nil { + return err + } + target.limiter = limiter + return nil } // internals contains application-specific internals that are created @@ -456,7 +478,6 @@ type shared struct { // dynamicConfiguration stores parameters of the // components that supports runtime reconfigurations. type dynamicConfiguration struct { - logger *logger.Prm pprof *httpComponent metrics *httpComponent } @@ -493,6 +514,7 @@ type cfg struct { cfgNetmap cfgNetmap cfgControlService cfgControlService cfgObject cfgObject + cfgQoSService cfgQoSService } // ReadCurrentNetMap reads network map which has been cached at the @@ -527,6 +549,8 @@ type cfgGRPC struct { maxChunkSize uint64 maxAddrAmount uint64 reconnectTimeout time.Duration + + limiter atomic.Pointer[limiting.SemaphoreLimiter] } func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { @@ -627,7 +651,6 @@ type cfgNetmap struct { state *networkState - needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime } @@ -663,10 +686,6 @@ type cfgAccessPolicyEngine struct { } type cfgObjectRoutines struct { - putRemote *ants.Pool - - putLocal *ants.Pool - replication *ants.Pool } @@ -690,24 +709,18 @@ func initCfg(appCfg *config.Config) *cfg { key := nodeconfig.Key(appCfg) - relayOnly := nodeconfig.Relay(appCfg) - netState := newNetworkState() - c.shared = initShared(appCfg, key, netState, relayOnly) + c.shared = initShared(appCfg, key, netState) netState.metrics = c.metricsCollector - logPrm := c.loggerPrm() + logPrm, err := c.loggerPrm() + fatalOnErr(err) logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) - if loggerconfig.ToLokiConfig(appCfg).Enabled { - log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) - return lokiCore - })) - } + logger.UpdateLevelForTags(logPrm) c.internals = initInternals(appCfg, log) @@ -718,7 +731,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgFrostfsID = initFrostfsID(appCfg) - c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) + c.cfgNetmap = initNetmap(appCfg, netState) c.cfgGRPC = initCfgGRPC() @@ -764,12 +777,8 @@ func initSdNotify(appCfg *config.Config) bool { return false } -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared { - var netAddr network.AddressGroup - - if !relayOnly { - netAddr = nodeconfig.BootstrapAddresses(appCfg) - } +func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { + netAddr := nodeconfig.BootstrapAddresses(appCfg) persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) @@ -820,18 +829,15 @@ func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) interna return result } -func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { +func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) - var reBootstrapTurnedOff atomic.Bool - reBootstrapTurnedOff.Store(relayOnly) return cfgNetmap{ scriptHash: contractsconfig.Netmap(appCfg), state: netState, workerPool: netmapWorkerPool, - needBootstrap: !relayOnly, - reBoostrapTurnedOff: &reBootstrapTurnedOff, + reBoostrapTurnedOff: &atomic.Bool{}, } } @@ -851,14 +857,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } } -func initCfgGRPC() cfgGRPC { +func initCfgGRPC() (cfg cfgGRPC) { maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes - return cfgGRPC{ - maxChunkSize: maxChunkSize, - maxAddrAmount: maxAddrAmount, - } + cfg.maxChunkSize = maxChunkSize + cfg.maxAddrAmount = maxAddrAmount + + return } func initCfgObject(appCfg *config.Config) cfgObject { @@ -875,9 +881,8 @@ func (c *cfg) engineOpts() []engine.Option { var opts []engine.Option opts = append(opts, - engine.WithShardPoolSize(c.EngineCfg.shardPoolSize), engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log), + engine.WithLogger(c.log.WithTag(logger.TagEngine)), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), ) @@ -914,7 +919,8 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { writecache.WithMaxCacheSize(wcRead.sizeLimit), writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log), + writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), + writecache.WithQoSLimiter(shCfg.limiter), ) } return writeCacheOpts @@ -953,7 +959,8 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithLogger(c.log), + blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), + blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), } @@ -976,7 +983,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. fstree.WithPerm(sRead.perm), fstree.WithDepth(sRead.depth), fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log), + fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), } if c.metricsCollector != nil { fstreeOpts = append(fstreeOpts, @@ -1006,12 +1013,9 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID ss := c.getSubstorageOpts(ctx, shCfg) blobstoreOpts := []blobstor.Option{ - blobstor.WithCompressObjects(shCfg.compress), - blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), - blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), - blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), + blobstor.WithCompression(shCfg.compression), blobstor.WithStorages(ss), - blobstor.WithLogger(c.log), + blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), } if c.metricsCollector != nil { blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) @@ -1030,12 +1034,13 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID } if c.metricsCollector != nil { mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) + shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics()) } var sh shardOptsWithID sh.configID = shCfg.id() sh.shOpts = []shard.Option{ - shard.WithLogger(c.log), + shard.WithLogger(c.log.WithTag(logger.TagShard)), shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithMode(shCfg.mode), @@ -1054,30 +1059,33 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return pool }), + shard.WithLimiter(shCfg.limiter), } return sh } -func (c *cfg) loggerPrm() *logger.Prm { - // check if it has been inited before - if c.dynamicConfiguration.logger == nil { - c.dynamicConfiguration.logger = new(logger.Prm) - } - +func (c *cfg) loggerPrm() (logger.Prm, error) { + var prm logger.Prm // (re)init read configuration - err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level) + err := prm.SetLevelString(c.LoggerCfg.level) if err != nil { // not expected since validation should be performed before - panic("incorrect log level format: " + c.LoggerCfg.level) + return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) } - err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination) + err = prm.SetDestination(c.LoggerCfg.destination) if err != nil { // not expected since validation should be performed before - panic("incorrect log destination format: " + c.LoggerCfg.destination) + return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) + } + prm.PrependTimestamp = c.LoggerCfg.timestamp + prm.Options = c.LoggerCfg.options + err = prm.SetTags(c.LoggerCfg.tags) + if err != nil { + // not expected since validation should be performed before + return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) } - c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp - return c.dynamicConfiguration.logger + return prm, nil } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1165,21 +1173,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { var err error - optNonBlocking := ants.WithNonblocking(true) - - putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote() - pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking) - fatalOnErr(err) - - putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal() - pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking) - fatalOnErr(err) - replicatorPoolSize := replicatorconfig.PoolSize(cfg) - if replicatorPoolSize <= 0 { - replicatorPoolSize = putRemoteCapacity - } - pool.replication, err = ants.NewPool(replicatorPoolSize) fatalOnErr(err) @@ -1252,11 +1246,6 @@ func (c *cfg) bootstrap(ctx context.Context) error { return bootstrapOnline(ctx, c) } -// needBootstrap checks if local node should be registered in network on bootup. -func (c *cfg) needBootstrap() bool { - return c.cfgNetmap.needBootstrap -} - type dCmp struct { name string reloadFunc func() error @@ -1331,11 +1320,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { // all the components are expected to support // Logger's dynamic reconfiguration approach - // Logger - - logPrm := c.loggerPrm() - - components := c.getComponents(ctx, logPrm) + components := c.getComponents(ctx) // Object c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) @@ -1373,10 +1358,17 @@ func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } -func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { +func (c *cfg) getComponents(ctx context.Context) []dCmp { var components []dCmp - components = append(components, dCmp{"logger", logPrm.Reload}) + components = append(components, dCmp{"logger", func() error { + prm, err := c.loggerPrm() + if err != nil { + return err + } + logger.UpdateLevelForTags(prm) + return nil + }}) components = append(components, dCmp{"runtime", func() error { setRuntimeParameters(ctx, c) return nil @@ -1397,6 +1389,12 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { } return err }}) + if c.treeService != nil { + components = append(components, dCmp{"tree", func() error { + c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) + return nil + }}) + } if cmp, updated := metricsComponent(c); updated { if cmp.enabled { cmp.preReload = enableMetricsSvc @@ -1409,17 +1407,13 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) } + components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }}) + return components } func (c *cfg) reloadPools() error { - newSize := objectconfig.Put(c.appCfg).PoolSizeLocal() - c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size") - - newSize = objectconfig.Put(c.appCfg).PoolSizeRemote() - c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size") - - newSize = replicatorconfig.PoolSize(c.appCfg) + newSize := replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") return nil diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go index 35dae97d9..ee9d4268b 100644 --- a/cmd/frostfs-node/config/configdir_test.go +++ b/cmd/frostfs-node/config/configdir_test.go @@ -12,13 +12,10 @@ import ( func TestConfigDir(t *testing.T) { dir := t.TempDir() - cfgFileName0 := path.Join(dir, "cfg_00.json") - cfgFileName1 := path.Join(dir, "cfg_01.yml") + cfgFileName := path.Join(dir, "cfg_01.yml") - require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777)) - require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777)) + require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) c := New("", dir, "") require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) - require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size"))) } diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go index e5735e88b..7994e7809 100644 --- a/cmd/frostfs-node/config/engine/config.go +++ b/cmd/frostfs-node/config/engine/config.go @@ -11,10 +11,6 @@ import ( const ( subsection = "storage" - - // ShardPoolSizeDefault is a default value of routine pool size per-shard to - // process object PUT operations in a storage engine. - ShardPoolSizeDefault = 20 ) // ErrNoShardConfigured is returned when at least 1 shard is required but none are found. @@ -65,18 +61,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) return nil } -// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section. -// -// Returns ShardPoolSizeDefault if the value is not a positive number. -func ShardPoolSize(c *config.Config) uint32 { - v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size") - if v > 0 { - return v - } - - return ShardPoolSizeDefault -} - // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // // Returns 0 if the the value is missing. diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index ef6380a62..401c54edc 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -14,6 +14,8 @@ import ( piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) @@ -53,7 +55,6 @@ func TestEngineSection(t *testing.T) { require.False(t, handlerCalled) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) - require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty)) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) }) @@ -63,7 +64,6 @@ func TestEngineSection(t *testing.T) { num := 0 require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) - require.EqualValues(t, 15, engineconfig.ShardPoolSize(c)) err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { defer func() { @@ -76,6 +76,7 @@ func TestEngineSection(t *testing.T) { ss := blob.Storages() pl := sc.Pilorama() gc := sc.GC() + limits := sc.Limits() switch num { case 0: @@ -100,10 +101,11 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, true, sc.Compress()) - require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) - require.Equal(t, true, sc.EstimateCompressibility()) - require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) + require.Equal(t, true, sc.Compression().Enabled) + require.Equal(t, compression.LevelFastest, sc.Compression().Level) + require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) + require.Equal(t, true, sc.Compression().EstimateCompressibility) + require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -134,6 +136,86 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, 30*time.Second, readLimits.IdleTimeout) + require.Equal(t, int64(10_000), readLimits.MaxRunningOps) + require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) + require.Equal(t, 45*time.Second, writeLimits.IdleTimeout) + require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) + require.Equal(t, int64(100), writeLimits.MaxWaitingOps) + require.ElementsMatch(t, readLimits.Tags, + []qos.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(20), + ReservedOps: toPtr(1000), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(70), + ReservedOps: toPtr(10000), + }, + { + Tag: "background", + Weight: toPtr(5), + LimitOps: toPtr(10000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + { + Tag: "policer", + Weight: toPtr(5), + LimitOps: toPtr(25000), + Prohibited: true, + }, + { + Tag: "treesync", + Weight: toPtr(5), + LimitOps: toPtr(25), + }, + }) + require.ElementsMatch(t, writeLimits.Tags, + []qos.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(200), + ReservedOps: toPtr(100), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(700), + ReservedOps: toPtr(1000), + }, + { + Tag: "background", + Weight: toPtr(50), + LimitOps: toPtr(1000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "policer", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "treesync", + Weight: toPtr(50), + LimitOps: toPtr(100), + }, + }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, fs.FileMode(0o644), pl.Perm()) @@ -156,8 +238,9 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, false, sc.Compress()) - require.Equal(t, []string(nil), sc.UncompressableContentTypes()) + require.Equal(t, false, sc.Compression().Enabled) + require.Equal(t, compression.LevelDefault, sc.Compression().Level) + require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -188,6 +271,17 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) + require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) + require.Equal(t, 0, len(readLimits.Tags)) + require.Equal(t, 0, len(writeLimits.Tags)) } return nil }) @@ -201,3 +295,7 @@ func TestEngineSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) } + +func toPtr(v float64) *float64 { + return &v +} diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go index a51308b5b..b564d36f8 100644 --- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go +++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go @@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d < 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } // NoSync returns the value of "no_sync" config parameter. @@ -66,8 +60,5 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) PageSize() int { s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index 0620c9f63..d42646da7 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -4,9 +4,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -26,42 +28,27 @@ func From(c *config.Config) *Config { return (*Config)(c) } -// Compress returns the value of "compress" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) Compress() bool { - return config.BoolSafe( - (*config.Config)(x), - "compress", - ) -} - -// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. -// -// Returns nil if a the value is missing or is invalid. -func (x *Config) UncompressableContentTypes() []string { - return config.StringSliceSafe( - (*config.Config)(x), - "compression_exclude_content_types") -} - -// EstimateCompressibility returns the value of "estimate_compressibility" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) EstimateCompressibility() bool { - return config.BoolSafe( - (*config.Config)(x), - "compression_estimate_compressibility", - ) +func (x *Config) Compression() compression.Config { + cc := (*config.Config)(x).Sub("compression") + if cc == nil { + return compression.Config{} + } + return compression.Config{ + Enabled: config.BoolSafe(cc, "enabled"), + UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), + Level: compression.Level(config.StringSafe(cc, "level")), + EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), + EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), + } } // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func (x *Config) EstimateCompressibilityThreshold() float64 { +func estimateCompressibilityThreshold(c *config.Config) float64 { v := config.FloatOrDefault( - (*config.Config)(x), - "compression_estimate_compressibility_threshold", + c, + "estimate_compressibility_threshold", EstimateCompressibilityThresholdDefault) if v < 0.0 || v > 1.0 { return EstimateCompressibilityThresholdDefault @@ -125,6 +112,14 @@ func (x *Config) GC() *gcconfig.Config { ) } +// Limits returns "limits" subsection as a limitsconfig.Config. +func (x *Config) Limits() *limitsconfig.Config { + return limitsconfig.From( + (*config.Config)(x). + Sub("limits"), + ) +} + // RefillMetabase returns the value of "resync_metabase" config parameter. // // Returns false if the value is not a valid bool. diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go new file mode 100644 index 000000000..ccd1e0000 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -0,0 +1,112 @@ +package limits + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "github.com/spf13/cast" +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Config is a wrapper over the config section +// which provides access to Shard's limits configurations. +type Config config.Config + +func (x *Config) ToConfig() qos.LimiterConfig { + result := qos.LimiterConfig{ + Read: x.read(), + Write: x.write(), + } + panicOnErr(result.Validate()) + return result +} + +func (x *Config) read() qos.OpConfig { + return x.parse("read") +} + +func (x *Config) write() qos.OpConfig { + return x.parse("write") +} + +func (x *Config) parse(sub string) qos.OpConfig { + c := (*config.Config)(x).Sub(sub) + var result qos.OpConfig + + if s := config.Int(c, "max_waiting_ops"); s > 0 { + result.MaxWaitingOps = s + } else { + result.MaxWaitingOps = qos.NoLimit + } + + if s := config.Int(c, "max_running_ops"); s > 0 { + result.MaxRunningOps = s + } else { + result.MaxRunningOps = qos.NoLimit + } + + if s := config.DurationSafe(c, "idle_timeout"); s > 0 { + result.IdleTimeout = s + } else { + result.IdleTimeout = qos.DefaultIdleTimeout + } + + result.Tags = tags(c) + + return result +} + +func tags(c *config.Config) []qos.IOTagConfig { + c = c.Sub("tags") + var result []qos.IOTagConfig + for i := 0; ; i++ { + tag := config.String(c, strconv.Itoa(i)+".tag") + if tag == "" { + return result + } + + var tagConfig qos.IOTagConfig + tagConfig.Tag = tag + + v := c.Value(strconv.Itoa(i) + ".weight") + if v != nil { + w, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.Weight = &w + } + + v = c.Value(strconv.Itoa(i) + ".limit_ops") + if v != nil { + l, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.LimitOps = &l + } + + v = c.Value(strconv.Itoa(i) + ".reserved_ops") + if v != nil { + r, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.ReservedOps = &r + } + + v = c.Value(strconv.Itoa(i) + ".prohibited") + if v != nil { + r, err := cast.ToBoolE(v) + panicOnErr(err) + tagConfig.Prohibited = r + } + + result = append(result, tagConfig) + } +} + +func panicOnErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go index 28671ca55..5d4e8f408 100644 --- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go +++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go @@ -52,10 +52,7 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d <= 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s <= 0 { - s = 0 - } - return s + return max(s, 0) } diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go index ba9eeea2b..20f373184 100644 --- a/cmd/frostfs-node/config/logger/config.go +++ b/cmd/frostfs-node/config/logger/config.go @@ -2,6 +2,7 @@ package loggerconfig import ( "os" + "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -60,6 +61,21 @@ func Timestamp(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "timestamp") } +// Tags returns the value of "tags" config parameter from "logger" section. +func Tags(c *config.Config) [][]string { + var res [][]string + sub := c.Sub(subsection).Sub("tags") + for i := 0; ; i++ { + s := sub.Sub(strconv.FormatInt(int64(i), 10)) + names := config.StringSafe(s, "names") + if names == "" { + break + } + res = append(res, []string{names, config.StringSafe(s, "level")}) + } + return res +} + // ToLokiConfig extracts loki config. func ToLokiConfig(c *config.Config) loki.Config { hostname, _ := os.Hostname() diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go index ffe8ac693..796ad529e 100644 --- a/cmd/frostfs-node/config/logger/config_test.go +++ b/cmd/frostfs-node/config/logger/config_test.go @@ -22,6 +22,9 @@ func TestLoggerSection_Level(t *testing.T) { require.Equal(t, "debug", loggerconfig.Level(c)) require.Equal(t, "journald", loggerconfig.Destination(c)) require.Equal(t, true, loggerconfig.Timestamp(c)) + tags := loggerconfig.Tags(c) + require.Equal(t, "main, morph", tags[0][0]) + require.Equal(t, "debug", tags[0][1]) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index d089870ea..a9f774d18 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -33,6 +33,9 @@ const ( // ContainerCacheSizeDefault represents the default size for the container cache. ContainerCacheSizeDefault = 100 + + // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. + PollCandidatesTimeoutDefault = 20 * time.Second ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -154,3 +157,17 @@ func FrostfsIDCacheSize(c *config.Config) uint32 { } return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") } + +// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter +// from "morph" section. +// +// Returns PollCandidatesTimeoutDefault if the value is not positive duration. +func NetmapCandidatesPollInterval(c *config.Config) time.Duration { + v := config.DurationSafe(c.Sub(subsection). + Sub("netmap").Sub("candidates"), "poll_interval") + if v > 0 { + return v + } + + return PollCandidatesTimeoutDefault +} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 969d77396..c50718c5f 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -3,7 +3,9 @@ package nodeconfig import ( "fmt" "io/fs" + "iter" "os" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -88,12 +90,8 @@ func Wallet(c *config.Config) *keys.PrivateKey { type stringAddressGroup []string -func (x stringAddressGroup) IterateAddresses(f func(string) bool) { - for i := range x { - if f(x[i]) { - break - } - } +func (x stringAddressGroup) Addresses() iter.Seq[string] { + return slices.Values(x) } func (x stringAddressGroup) NumberOfAddresses() int { @@ -133,14 +131,6 @@ func Attributes(c *config.Config) (attrs []string) { return } -// Relay returns the value of "relay" config parameter -// from "node" section. -// -// Returns false if the value is not set. -func Relay(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "relay") -} - // PersistentSessions returns structure that provides access to "persistent_sessions" // subsection of "node" section. func PersistentSessions(c *config.Config) PersistentSessionsConfig { @@ -217,3 +207,8 @@ func (l PersistentPolicyRulesConfig) NoSync() bool { func CompatibilityMode(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") } + +// LocodeDBPath returns path to LOCODE database. +func LocodeDBPath(c *config.Config) string { + return config.String(c.Sub(subsection), "locode_db_path") +} diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index 7b9adecf4..9af1dc038 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -29,12 +29,10 @@ func TestNodeSection(t *testing.T) { ) attribute := Attributes(empty) - relay := Relay(empty) persisessionsPath := PersistentSessions(empty).Path() persistatePath := PersistentState(empty).Path() require.Empty(t, attribute) - require.Equal(t, false, relay) require.Equal(t, "", persisessionsPath) require.Equal(t, PersistentStatePathDefault, persistatePath) }) @@ -45,7 +43,6 @@ func TestNodeSection(t *testing.T) { key := Key(c) addrs := BootstrapAddresses(c) attributes := Attributes(c) - relay := Relay(c) wKey := Wallet(c) persisessionsPath := PersistentSessions(c).Path() persistatePath := PersistentState(c).Path() @@ -87,8 +84,6 @@ func TestNodeSection(t *testing.T) { return false }) - require.Equal(t, true, relay) - require.Len(t, attributes, 2) require.Equal(t, "Price:11", attributes[0]) require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go index 6ff1fe2ab..c8c967d30 100644 --- a/cmd/frostfs-node/config/object/config.go +++ b/cmd/frostfs-node/config/object/config.go @@ -21,10 +21,6 @@ const ( putSubsection = "put" getSubsection = "get" - - // PutPoolSizeDefault is a default value of routine pool size to - // process object.Put requests in object service. - PutPoolSizeDefault = 10 ) // Put returns structure that provides access to "put" subsection of @@ -35,30 +31,6 @@ func Put(c *config.Config) PutConfig { } } -// PoolSizeRemote returns the value of "remote_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeRemote() int { - v := config.Int(g.cfg, "remote_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - -// PoolSizeLocal returns the value of "local_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeLocal() int { - v := config.Int(g.cfg, "local_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - // SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. func (g PutConfig) SkipSessionTokenIssuerVerification() bool { return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go index e2bb105d9..1c525ef55 100644 --- a/cmd/frostfs-node/config/object/config_test.go +++ b/cmd/frostfs-node/config/object/config_test.go @@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote()) - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal()) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) }) @@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) { const path = "../../../../config/example/node" fileConfigTest := func(c *config.Config) { - require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote()) - require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal()) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) } diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go new file mode 100644 index 000000000..85f8180ed --- /dev/null +++ b/cmd/frostfs-node/config/qos/config.go @@ -0,0 +1,46 @@ +package qos + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +const ( + subsection = "qos" + criticalSubSection = "critical" + internalSubSection = "internal" +) + +// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, criticalSubSection) +} + +// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, internalSubSection) +} + +func authorizedKeys(c *config.Config, sub string) keys.PublicKeys { + strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys") + pubs := make(keys.PublicKeys, 0, len(strKeys)) + + for i := range strKeys { + pub, err := keys.NewPublicKeyFromString(strKeys[i]) + if err != nil { + panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err)) + } + + pubs = append(pubs, pub) + } + + return pubs +} diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go new file mode 100644 index 000000000..b3b6019cc --- /dev/null +++ b/cmd/frostfs-node/config/qos/config_test.go @@ -0,0 +1,40 @@ +package qos + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + + require.Empty(t, CriticalAuthorizedKeys(empty)) + require.Empty(t, InternalAuthorizedKeys(empty)) + }) + + const path = "../../../../config/example/node" + + criticalPubs := make(keys.PublicKeys, 2) + criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") + criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") + + internalPubs := make(keys.PublicKeys, 2) + internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2") + internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a") + + fileConfigTest := func(c *config.Config) { + require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c)) + require.Equal(t, internalPubs, InternalAuthorizedKeys(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go index 0fbac935c..e954bf19d 100644 --- a/cmd/frostfs-node/config/replicator/config.go +++ b/cmd/frostfs-node/config/replicator/config.go @@ -11,6 +11,8 @@ const ( // PutTimeoutDefault is a default timeout of object put request in replicator. PutTimeoutDefault = 5 * time.Second + // PoolSizeDefault is a default pool size for put request in replicator. + PoolSizeDefault = 10 ) // PutTimeout returns the value of "put_timeout" config parameter @@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration { // PoolSize returns the value of "pool_size" config parameter // from "replicator" section. +// +// Returns PoolSizeDefault if the value is non-positive integer. func PoolSize(c *config.Config) int { - return int(config.IntSafe(c.Sub(subsection), "pool_size")) + v := int(config.IntSafe(c.Sub(subsection), "pool_size")) + if v > 0 { + return v + } + + return PoolSizeDefault } diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go index 2129c01b4..2aa490946 100644 --- a/cmd/frostfs-node/config/replicator/config_test.go +++ b/cmd/frostfs-node/config/replicator/config_test.go @@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) { empty := configtest.EmptyConfig() require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) - require.Equal(t, 0, replicatorconfig.PoolSize(empty)) + require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) }) const path = "../../../../config/example/node" diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go new file mode 100644 index 000000000..e0efdfde2 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config.go @@ -0,0 +1,42 @@ +package rpcconfig + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" +) + +const ( + subsection = "rpc" + limitsSubsection = "limits" +) + +type LimitConfig struct { + Methods []string + MaxOps int64 +} + +// Limits returns the "limits" config from "rpc" section. +func Limits(c *config.Config) []LimitConfig { + c = c.Sub(subsection).Sub(limitsSubsection) + + var limits []LimitConfig + + for i := uint64(0); ; i++ { + si := strconv.FormatUint(i, 10) + sc := c.Sub(si) + + methods := config.StringSliceSafe(sc, "methods") + if len(methods) == 0 { + break + } + + if sc.Value("max_ops") == nil { + panic("no max operations for method group") + } + + limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) + } + + return limits +} diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go new file mode 100644 index 000000000..a6365e19f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config_test.go @@ -0,0 +1,77 @@ +package rpcconfig + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestRPCSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + require.Empty(t, Limits(configtest.EmptyConfig())) + }) + + t.Run("correct config", func(t *testing.T) { + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(1000)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("no max operations", func(t *testing.T) { + const path = "testdata/no_max_ops" + + fileConfigTest := func(c *config.Config) { + require.Panics(t, func() { _ = Limits(c) }) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("zero max operations", func(t *testing.T) { + const path = "testdata/zero_max_ops" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(0)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) +} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env new file mode 100644 index 000000000..2fed4c5bc --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env @@ -0,0 +1,3 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json new file mode 100644 index 000000000..6156aa71d --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json @@ -0,0 +1,18 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ] + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml new file mode 100644 index 000000000..e50b7ae93 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml @@ -0,0 +1,8 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env new file mode 100644 index 000000000..ce7302b0b --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env @@ -0,0 +1,4 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=0 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json new file mode 100644 index 000000000..16a1c173f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json @@ -0,0 +1,19 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 0 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml new file mode 100644 index 000000000..525d768d4 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml @@ -0,0 +1,9 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 0 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 012012297..bdb280d87 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) { wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) - c.shared.cnrClient = wrap + c.cnrClient = wrap cnrSrc := cntClient.AsContainerSource(wrap) @@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) { frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } - c.shared.frostfsidClient = frostfsIDSubjectProvider + c.frostfsidClient = frostfsIDSubjectProvider c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( @@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) { service := containerService.NewSignService( &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, + newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, containerService.NewSplitterService( c.cfgContainer.containerBatchSize, c.respSvc, containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index ecd82bba5..1825013c7 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -7,9 +7,12 @@ import ( controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" + tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -50,7 +53,14 @@ func initControlService(ctx context.Context, c *cfg) { return } - c.cfgControlService.server = grpc.NewServer() + c.cfgControlService.server = grpc.NewServer( + grpc.ChainUnaryInterceptor( + qos.NewSetCriticalIOTagUnaryServerInterceptor(), + metrics.NewUnaryServerInterceptor(), + tracing.NewUnaryServerInterceptor(), + ), + // control service has no stream methods, so no stream interceptors added + ) c.onShutdown(func() { stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 6105be861..6b6d44750 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -4,14 +4,19 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net" "time" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" + rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -130,12 +135,16 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( + qos.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), grpc.ChainStreamInterceptor( + qos.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), } @@ -224,3 +233,54 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) } + +func initRPCLimiter(c *cfg) error { + var limits []limiting.KeyLimit + for _, l := range rpcconfig.Limits(c.appCfg) { + limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) + } + + if err := validateRPCLimits(c, limits); err != nil { + return fmt.Errorf("validate RPC limits: %w", err) + } + + limiter, err := limiting.NewSemaphoreLimiter(limits) + if err != nil { + return fmt.Errorf("create RPC limiter: %w", err) + } + + c.cfgGRPC.limiter.Store(limiter) + return nil +} + +func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error { + availableMethods := getAvailableMethods(c.cfgGRPC.servers) + for _, limit := range limits { + for _, method := range limit.Keys { + if _, ok := availableMethods[method]; !ok { + return fmt.Errorf("set limit on an unknown method %q", method) + } + } + } + return nil +} + +func getAvailableMethods(servers []grpcServer) map[string]struct{} { + res := make(map[string]struct{}) + for _, server := range servers { + for _, method := range getMethodsForServer(server.Server) { + res[method] = struct{}{} + } + } + return res +} + +func getMethodsForServer(server *grpc.Server) []string { + var res []string + for service, info := range server.GetServiceInfo() { + for _, method := range info.Methods { + res = append(res, fmt.Sprintf("/%s/%s", service, method.Name)) + } + } + return res +} diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index 3c15dc439..0228d2a10 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -101,6 +101,7 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) }) initAccessPolicyEngine(ctx, c) initAndLog(ctx, c, "access policy engine", func(c *cfg) { @@ -116,6 +117,8 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "apemanager", initAPEManagerService) initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) + initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) }) + initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index 19b4af51f..d9ca01e70 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -8,38 +8,38 @@ import ( func metricsComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.metrics == nil { - c.dynamicConfiguration.metrics = new(httpComponent) - c.dynamicConfiguration.metrics.cfg = c - c.dynamicConfiguration.metrics.name = "metrics" - c.dynamicConfiguration.metrics.handler = metrics.Handler() + if c.metrics == nil { + c.metrics = new(httpComponent) + c.metrics.cfg = c + c.metrics.name = "metrics" + c.metrics.handler = metrics.Handler() updated = true } // (re)init read configuration enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.metrics.enabled { - c.dynamicConfiguration.metrics.enabled = enabled + if enabled != c.metrics.enabled { + c.metrics.enabled = enabled updated = true } address := metricsconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.metrics.address { - c.dynamicConfiguration.metrics.address = address + if address != c.metrics.address { + c.metrics.address = address updated = true } dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.metrics.shutdownDur { - c.dynamicConfiguration.metrics.shutdownDur = dur + if dur != c.metrics.shutdownDur { + c.metrics.shutdownDur = dur updated = true } - return c.dynamicConfiguration.metrics, updated + return c.metrics, updated } func enableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Enable() + c.metricsSvc.Enable() } func disableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Disable() + c.metricsSvc.Disable() } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 657e22389..917cf6fc0 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -60,10 +61,11 @@ func (c *cfg) initMorphComponents(ctx context.Context) { } if c.cfgMorph.cacheTTL < 0 { - netmapSource = wrap + netmapSource = newRawNetmapStorage(wrap) } else { // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) + netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, + morphconfig.NetmapCandidatesPollInterval(c.appCfg)) } c.netMapSource = netmapSource @@ -83,7 +85,7 @@ func initMorphClient(ctx context.Context, c *cfg) { cli, err := client.New(ctx, c.key, client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log), + client.WithLogger(c.log.WithTag(logger.TagMorph)), client.WithMetrics(c.metricsCollector.MorphClientMetrics()), client.WithEndpoints(addresses...), client.WithConnLostCallback(func() { @@ -164,6 +166,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { err error subs subscriber.Subscriber ) + log := c.log.WithTag(logger.TagMorph) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { @@ -172,14 +175,14 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { } subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: c.log, + Log: log, StartFromBlock: fromSideChainBlock, Client: c.cfgMorph.client, }) fatalOnErr(err) lis, err := event.NewListener(event.ListenerParams{ - Logger: c.log, + Logger: log, Subscriber: subs, }) fatalOnErr(err) @@ -197,7 +200,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, + log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -208,11 +211,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { - c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, + log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 0e90e7707..7dfb4fe12 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,6 +8,7 @@ import ( "net" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -104,9 +105,7 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { v := s.nodeInfo.Load() if v != nil { res, ok = v.(netmapSDK.NodeInfo) - if !ok { - panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v)) - } + assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) } return @@ -124,7 +123,11 @@ func nodeKeyFromNetmap(c *cfg) []byte { func (c *cfg) iterateNetworkAddresses(f func(string) bool) { ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - ni.IterateNetworkEndpoints(f) + for s := range ni.NetworkEndpoints() { + if f(s) { + return + } + } } } @@ -184,7 +187,7 @@ func addNewEpochNotificationHandlers(c *cfg) { c.updateContractNodeInfo(ctx, e) - if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 + if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } @@ -206,14 +209,12 @@ func addNewEpochNotificationHandlers(c *cfg) { // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. func bootstrapNode(ctx context.Context, c *cfg) { - if c.needBootstrap() { - if c.IsMaintenance() { - c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return - } - err := c.bootstrap(ctx) - fatalOnErrDetails("bootstrap error", err) + if c.IsMaintenance() { + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + return } + err := c.bootstrap(ctx) + fatalOnErrDetails("bootstrap error", err) } func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { @@ -349,8 +350,6 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { ) } -var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") - func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: @@ -362,10 +361,6 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro c.stopMaintenance(ctx) - if !c.needBootstrap() { - return errRelayBootstrap - } - if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) return bootstrapOnline(ctx, c) diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go new file mode 100644 index 000000000..e6be9cdf5 --- /dev/null +++ b/cmd/frostfs-node/netmap_source.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +type rawNetmapSource struct { + client *netmapClient.Client +} + +func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { + return &rawNetmapSource{ + client: client, + } +} + +func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMap(ctx, diff) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMapByEpoch(ctx, epoch) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { + return s.client.Epoch(ctx) +} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 77446b81c..c33c02b3f 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -16,7 +16,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" @@ -32,6 +31,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -168,16 +168,14 @@ func initObjectService(c *cfg) { sPatch := createPatchSvc(sGet, sPut) // build service pipeline - // grpc | audit | | signature | response | acl | ape | split + // grpc | audit | qos | | signature | response | acl | ape | split splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - apeSvc := createAPEService(c, splitSvc) - - aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) + apeSvc := createAPEService(c, &irFetcher, splitSvc) var commonSvc objectService.Common - commonSvc.Init(&c.internals, aclSvc) + commonSvc.Init(&c.internals, apeSvc) respSvc := objectService.NewResponseService( &commonSvc, @@ -189,9 +187,10 @@ func initObjectService(c *cfg) { respSvc, ) - c.shared.metricsSvc = objectService.NewMetricCollector( + c.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit) + qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) + auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { @@ -219,9 +218,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) - pol := policer.New( - policer.WithLogger(c.log), + policer.WithLogger(c.log.WithTag(logger.TagPolicer)), policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), policer.WithBuryFunc(buryFn), policer.WithContainerSource(c.cfgObject.cnrSource), @@ -283,7 +281,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl }) } -func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { +func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { return &innerRingFetcherWithNotary{ sidechain: c.cfgMorph.client, } @@ -293,7 +291,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( - replicator.WithLogger(c.log), + replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), replicator.WithPutTimeout( replicatorconfig.PutTimeout(c.appCfg), ), @@ -325,7 +323,6 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche c, c.cfgNetmap.state, irFetcher, - objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal), objectwriter.WithLogger(c.log), objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), ) @@ -351,7 +348,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav c.netMapSource, keyStorage, containerSource, - searchsvc.WithLogger(c.log), + searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), ) } @@ -377,7 +374,7 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ), coreConstructor, containerSource, - getsvc.WithLogger(c.log)) + getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) } func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -388,7 +385,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag c.netMapSource, c, c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log), + getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), ) } @@ -405,7 +402,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi cfg: c, }, keyStorage, - deletesvc.WithLogger(c.log), + deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), ) } @@ -429,28 +426,19 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi ) } -func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { - return v2.New( - apeSvc, - c.netMapSource, - irFetcher, - c.cfgObject.cnrSource, - v2.WithLogger(c.log), - ) -} - -func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { +func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.shared.frostfsidClient, + c.frostfsidClient, c.netMapSource, c.cfgNetmap.state, c.cfgObject.cnrSource, c.binPublicKey, ), + objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), splitSvc, ) } diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index 5b40c8a88..e4da8119f 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) { func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.pprof == nil { - c.dynamicConfiguration.pprof = new(httpComponent) - c.dynamicConfiguration.pprof.cfg = c - c.dynamicConfiguration.pprof.name = "pprof" - c.dynamicConfiguration.pprof.handler = httputil.Handler() - c.dynamicConfiguration.pprof.preReload = tuneProfilers + if c.pprof == nil { + c.pprof = new(httpComponent) + c.pprof.cfg = c + c.pprof.name = "pprof" + c.pprof.handler = httputil.Handler() + c.pprof.preReload = tuneProfilers updated = true } // (re)init read configuration enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.pprof.enabled { - c.dynamicConfiguration.pprof.enabled = enabled + if enabled != c.pprof.enabled { + c.pprof.enabled = enabled updated = true } address := profilerconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.pprof.address { - c.dynamicConfiguration.pprof.address = address + if address != c.pprof.address { + c.pprof.address = address updated = true } dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.pprof.shutdownDur { - c.dynamicConfiguration.pprof.shutdownDur = dur + if dur != c.pprof.shutdownDur { + c.pprof.shutdownDur = dur updated = true } - return c.dynamicConfiguration.pprof, updated + return c.pprof, updated } func tuneProfilers(c *cfg) { diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go new file mode 100644 index 000000000..6394b668b --- /dev/null +++ b/cmd/frostfs-node/qos.go @@ -0,0 +1,108 @@ +package main + +import ( + "bytes" + "context" + + qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "go.uber.org/zap" +) + +type cfgQoSService struct { + netmapSource netmap.Source + logger *logger.Logger + allowedCriticalPubs [][]byte + allowedInternalPubs [][]byte +} + +func initQoSService(c *cfg) { + criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) + internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg) + rawCriticalPubs := make([][]byte, 0, len(criticalPubs)) + rawInternalPubs := make([][]byte, 0, len(internalPubs)) + for i := range criticalPubs { + rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes()) + } + for i := range internalPubs { + rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) + } + + c.cfgQoSService = cfgQoSService{ + netmapSource: c.netMapSource, + logger: c.log, + allowedCriticalPubs: rawCriticalPubs, + allowedInternalPubs: rawInternalPubs, + } +} + +func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { + rawTag, defined := qosTagging.IOTagFromContext(ctx) + if !defined { + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) + } + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + ioTag, err := qos.FromRawString(rawTag) + if err != nil { + s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + + switch ioTag { + case qos.IOTagClient: + return ctx + case qos.IOTagCritical: + for _, pk := range s.allowedCriticalPubs { + if bytes.Equal(pk, requestSignPublicKey) { + return ctx + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), requestSignPublicKey) { + return ctx + } + } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + case qos.IOTagInternal: + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return ctx + } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + default: + s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } +} + +func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { + for _, pk := range s.allowedInternalPubs { + if bytes.Equal(pk, publicKey) { + return true + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return false + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), publicKey) { + return true + } + } + + return false +} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go new file mode 100644 index 000000000..971f9eebf --- /dev/null +++ b/cmd/frostfs-node/qos_test.go @@ -0,0 +1,226 @@ +package main + +import ( + "context" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSService_Client(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag client defined", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func TestQoSService_Internal(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) +} + +func TestQoSService_Critical(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) +} + +func TestQoSService_NetmapGetError(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + s.netmapSource = &utilTesting.TestNetmapSource{} + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { + nmSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + reqSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedCritSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedIntSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + var node netmap.NodeInfo + node.SetPublicKey(nmSigner.PublicKey().Bytes()) + nm := &netmap.NetMap{} + nm.SetEpoch(100) + nm.SetNodes([]netmap.NodeInfo{node}) + + return &cfgQoSService{ + logger: test.NewLogger(t), + netmapSource: &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ + 100: nm, + }, + CurrentEpoch: 100, + }, + allowedCriticalPubs: [][]byte{ + allowedCritSigner.PublicKey().Bytes(), + }, + allowedInternalPubs: [][]byte{ + allowedIntSigner.PublicKey().Bytes(), + }, + }, + &testQoSServicePublicKeys{ + NetmapNode: nmSigner.PublicKey().Bytes(), + Request: reqSigner.PublicKey().Bytes(), + Internal: allowedIntSigner.PublicKey().Bytes(), + Critical: allowedCritSigner.PublicKey().Bytes(), + } +} + +type testQoSServicePublicKeys struct { + NetmapNode []byte + Request []byte + Internal []byte + Critical []byte +} diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index 2f3c9cbfe..fbfe3f5e6 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -55,7 +56,7 @@ func initSessionService(c *cfg) { server := sessionTransportGRPC.New( sessionSvc.NewSignService( &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log), + sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), ), ) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index f8330a25e..62af45389 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" "google.golang.org/grpc" @@ -51,12 +52,12 @@ func initTreeService(c *cfg) { c.treeService = tree.New( tree.WithContainerSource(cnrSource{ src: c.cfgObject.cnrSource, - cli: c.shared.cnrClient, + cli: c.cnrClient, }), - tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), + tree.WithFrostfsidSubjectProvider(c.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log), + tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), tree.WithContainerCacheSize(treeConfig.CacheSize()), tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), @@ -72,7 +73,7 @@ func initTreeService(c *cfg) { ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - tree.RegisterTreeServiceServer(s, c.treeService) + tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) }) c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index ae52b9e4a..22d2e0aa9 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -30,6 +30,11 @@ func validateConfig(c *config.Config) error { return fmt.Errorf("invalid logger destination: %w", err) } + err = loggerPrm.SetTags(loggerconfig.Tags(c)) + if err != nil { + return fmt.Errorf("invalid list of allowed tags: %w", err) + } + // shard configuration validation shardNum := 0 diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index d9c0f167f..495365cf0 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -1,7 +1,6 @@ package main import ( - "os" "path/filepath" "testing" @@ -22,17 +21,4 @@ func TestValidate(t *testing.T) { require.NoError(t, err) }) }) - - t.Run("mainnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) - t.Run("testnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) } diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index b8acf0143..13f447af4 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -51,8 +51,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { } cmd.PrintErrln(err) - if cmd.PersistentPostRun != nil { - cmd.PersistentPostRun(cmd, nil) + for p := cmd; p != nil; p = p.Parent() { + if p.PersistentPostRun != nil { + p.PersistentPostRun(cmd, nil) + if !cobra.EnableTraverseRunHooks { + break + } + } } os.Exit(code) } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index f550552d2..5dd1a060e 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -27,15 +27,15 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - netmap.IterateNetworkEndpoints(node, func(endpoint string) { + for endpoint := range node.NetworkEndpoints() { cmd.Printf("%s ", endpoint) - }) + } cmd.Println() if !short { - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { cmd.Printf("%s\t%s: %s\n", indent, key, value) - }) + } } } diff --git a/config/example/ir.env b/config/example/ir.env index ebd91c243..c13044a6e 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -1,5 +1,7 @@ FROSTFS_IR_LOGGER_LEVEL=info FROSTFS_IR_LOGGER_TIMESTAMP=true +FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_IR_WALLET_PATH=/path/to/wallet.json FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX diff --git a/config/example/ir.yaml b/config/example/ir.yaml index 49f9fd324..ed53f014b 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -3,6 +3,9 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" timestamp: true + tags: + - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. + level: debug wallet: path: /path/to/wallet.json # Path to NEP-6 NEO wallet file diff --git a/config/example/node.env b/config/example/node.env index b2a0633a9..9a2426358 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -1,6 +1,8 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_LOGGER_DESTINATION=journald FROSTFS_LOGGER_TIMESTAMP=true +FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 @@ -20,9 +22,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" FROSTFS_NODE_ATTRIBUTE_0=Price:11 FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" -FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state +FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db # Tree service section FROSTFS_TREE_ENABLED=true @@ -87,14 +89,16 @@ FROSTFS_REPLICATOR_POOL_SIZE=10 FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 # Object service section -FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 -FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=1000 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 + # Storage engine section -FROSTFS_STORAGE_SHARD_POOL_SIZE=15 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 ## 0 shard ### Flag to refill Metabase from BlobStor @@ -119,7 +123,8 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESS=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 @@ -154,6 +159,54 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 #### Limit of concurrent workers collecting expired objects by the garbage collector FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 +#### Limits config +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 ## 1 shard ### Flag to refill Metabase from BlobStor @@ -225,3 +278,6 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" FROSTFS_MULTINET_BALANCER=roundrobin FROSTFS_MULTINET_RESTRICT=false FROSTFS_MULTINET_FALLBACK_DELAY=350ms + +FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" +FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" diff --git a/config/example/node.json b/config/example/node.json index f3192ac2f..6b7a9c2c6 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -2,7 +2,13 @@ "logger": { "level": "debug", "destination": "journald", - "timestamp": true + "timestamp": true, + "tags": [ + { + "names": "main, morph", + "level": "debug" + } + ] }, "pprof": { "enabled": true, @@ -31,13 +37,13 @@ ], "attribute_0": "Price:11", "attribute_1": "UN-LOCODE:RU MSK", - "relay": true, "persistent_sessions": { "path": "/sessions" }, "persistent_state": { "path": "/state" - } + }, + "locode_db_path": "/path/to/locode/db" }, "grpc": { "0": { @@ -134,16 +140,30 @@ "tombstone_lifetime": 10 }, "put": { - "remote_pool_size": 100, - "local_pool_size": 200, "skip_session_token_issuer_verification": true }, "get": { "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] } }, + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 1000 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + }, "storage": { - "shard_pool_size": 15, "shard_ro_error_threshold": 100, "shard": { "0": { @@ -168,12 +188,15 @@ "max_batch_size": 100, "max_batch_delay": "10ms" }, - "compress": true, - "compression_exclude_content_types": [ - "audio/*", "video/*" - ], - "compression_estimate_compressibility": true, - "compression_estimate_compressibility_threshold": 0.7, + "compression": { + "enabled": true, + "level": "fastest", + "exclude_content_types": [ + "audio/*", "video/*" + ], + "estimate_compressibility": true, + "estimate_compressibility_threshold": 0.7 + }, "small_object_size": 102400, "blobstor": [ { @@ -206,6 +229,87 @@ "remover_sleep_interval": "2m", "expired_collector_batch_size": 1500, "expired_collector_worker_count": 15 + }, + "limits": { + "read": { + "max_running_ops": 10000, + "max_waiting_ops": 1000, + "idle_timeout": "30s", + "tags": [ + { + "tag": "internal", + "weight": 20, + "limit_ops": 0, + "reserved_ops": 1000 + }, + { + "tag": "client", + "weight": 70, + "reserved_ops": 10000 + }, + { + "tag": "background", + "weight": 5, + "limit_ops": 10000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 5, + "limit_ops": 25000 + }, + { + "tag": "policer", + "weight": 5, + "limit_ops": 25000, + "prohibited": true + }, + { + "tag": "treesync", + "weight": 5, + "limit_ops": 25 + } + ] + }, + "write": { + "max_running_ops": 1000, + "max_waiting_ops": 100, + "idle_timeout": "45s", + "tags": [ + { + "tag": "internal", + "weight": 200, + "limit_ops": 0, + "reserved_ops": 100 + }, + { + "tag": "client", + "weight": 700, + "reserved_ops": 1000 + }, + { + "tag": "background", + "weight": 50, + "limit_ops": 1000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "policer", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "treesync", + "weight": 50, + "limit_ops": 100 + } + ] + } } }, "1": { @@ -226,7 +330,9 @@ "max_batch_size": 200, "max_batch_delay": "20ms" }, - "compress": false, + "compression": { + "enabled": false + }, "small_object_size": 102400, "blobstor": [ { @@ -305,5 +411,19 @@ "balancer": "roundrobin", "restrict": false, "fallback_delay": "350ms" + }, + "qos": { + "critical": { + "authorized_keys": [ + "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", + "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" + ] + }, + "internal": { + "authorized_keys": [ + "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", + "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" + ] + } } } diff --git a/config/example/node.yaml b/config/example/node.yaml index c5acf5386..2d4bc90fb 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -2,6 +2,9 @@ logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" timestamp: true + tags: + - names: "main, morph" + level: debug systemdnotify: enabled: true @@ -31,11 +34,11 @@ node: - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK - relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node + "locode_db_path": "/path/to/locode/db" grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server @@ -95,6 +98,9 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 + netmap: + candidates: + poll_interval: 20s apiclient: dial_timeout: 15s # timeout for FrostFS API client connection @@ -117,17 +123,23 @@ object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs put: - remote_pool_size: 100 # number of async workers for remote PUT operations - local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true get: priority: # list of metrics of nodes for prioritization - $attribute:ClusterName - $attribute:UN-LOCODE +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 + storage: - # note: shard configuration can be omitted for relay node (see `node.relay`) - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: @@ -141,7 +153,7 @@ storage: flush_worker_count: 30 # number of write-cache flusher threads metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) + perm: 0o644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms @@ -149,18 +161,19 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compress: false # turn on/off zstd(level 3) compression of stored objects + compression: + enabled: false # turn on/off zstd compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: @@ -191,12 +204,14 @@ storage: max_batch_size: 100 max_batch_delay: 10ms - compress: true # turn on/off zstd(level 3) compression of stored objects - compression_exclude_content_types: - - audio/* - - video/* - compression_estimate_compressibility: true - compression_estimate_compressibility_threshold: 0.7 + compression: + enabled: true # turn on/off zstd compression of stored objects + level: fastest + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza @@ -219,6 +234,59 @@ storage: expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector + limits: + read: + max_running_ops: 10000 + max_waiting_ops: 1000 + idle_timeout: 30s + tags: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + prohibited: true + - tag: treesync + weight: 5 + limit_ops: 25 + write: + max_running_ops: 1000 + max_waiting_ops: 100 + idle_timeout: 45s + tags: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 + - tag: treesync + weight: 50 + limit_ops: 100 + 1: writecache: path: tmp/1/cache # write-cache root directory @@ -237,7 +305,7 @@ storage: pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0644 # permission to use for the database file and intermediate directories + perm: 0o644 # permission to use for the database file and intermediate directories tracing: enabled: true @@ -270,3 +338,13 @@ multinet: balancer: roundrobin restrict: false fallback_delay: 350ms + +qos: + critical: + authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag + - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 + - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a diff --git a/config/mainnet/README.md b/config/mainnet/README.md deleted file mode 100644 index 717a9b0ff..000000000 --- a/config/mainnet/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# N3 Mainnet Storage node configuration - -Here is a template for simple storage node configuration in N3 Mainnet. -Make sure to specify correct values instead of `<...>` placeholders. -Do not change `contracts` section. Run the latest frostfs-node release with -the fixed config `frostfs-node -c config.yml` - -To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract. -The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221` -(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`) - -## Tips - -Use `grpcs://` scheme in the announced address if you enable TLS in grpc server. -```yaml -node: - addresses: - - grpcs://frostfs.my.org:8080 - -grpc: - num: 1 - 0: - endpoint: frostfs.my.org:8080 - tls: - enabled: true - certificate: /path/to/cert - key: /path/to/key -``` diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml deleted file mode 100644 index d86ea451f..000000000 --- a/config/mainnet/config.yml +++ /dev/null @@ -1,70 +0,0 @@ -node: - wallet: - path: - address: - password: - addresses: - - - attribute_0: UN-LOCODE: - attribute_1: Price:100000 - attribute_2: User-Agent:FrostFS\/0.9999 - -grpc: - num: 1 - 0: - endpoint: - tls: - enabled: false - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/path/metabase - perm: 0600 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m - -logger: - level: info - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -object: - put: - remote_pool_size: 100 - local_pool_size: 100 - -morph: - rpc_endpoint: - - wss://rpc1.morph.frostfs.info:40341/ws - - wss://rpc2.morph.frostfs.info:40341/ws - - wss://rpc3.morph.frostfs.info:40341/ws - - wss://rpc4.morph.frostfs.info:40341/ws - - wss://rpc5.morph.frostfs.info:40341/ws - - wss://rpc6.morph.frostfs.info:40341/ws - - wss://rpc7.morph.frostfs.info:40341/ws - dial_timeout: 20s - -contracts: - balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 - container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 - netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 diff --git a/config/testnet/README.md b/config/testnet/README.md deleted file mode 100644 index e2cda33ec..000000000 --- a/config/testnet/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# N3 Testnet Storage node configuration - -There is a prepared configuration for NeoFS Storage Node deployment in -N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared -docker image and run it with docker-compose. - -## Build image - -Prepared **frostfs-storage-testnet** image is available at Docker Hub. -However, if you need to rebuild it for some reason, run -`make image-storage-testnet` command. - -``` -$ make image-storage-testnet -... -Successfully built ab0557117b02 -Successfully tagged nspccdev/neofs-storage-testnet:0.25.1 -``` - -## Deploy node - -To run a storage node in N3 Testnet environment, you should deposit GAS assets, -update docker-compose file and start the node. - -### Deposit - -The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a -bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx. - -First, obtain GAS in N3 Testnet chain. You can do that with -[faucet](https://neowish.ngd.network) service. - -Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet. -You can provide scripthash in the `data` argument of transfer tx to make a -deposit to a specified account. Otherwise, deposit is made to the tx sender. - -NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`, -so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`. - -See a deposit example with `neo-go`. - -``` -neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \ ---from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \ ---to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \ ---token GAS \ ---amount 1 -``` - -### Configure - -Next, configure `node_config.env` file. Change endpoints values. Both -should contain your **public** IP. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -``` - -Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory) -attribute. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED -``` - -You can validate UN/LOCODE attribute in -[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0) -with frostfs-cli. - -``` -$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED' -Country: Russia -Location: Saint Petersburg (ex Leningrad) -Continent: Europe -Subdivision: [SPE] Sankt-Peterburg -Coordinates: 59.53, 30.15 -``` - -It is recommended to pass the node's key as a file. To do so, convert your wallet -WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file. - -``` -// Print WIF in a 32-byte hex format -$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56 -PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059 -WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ -ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc -ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf - -// Save 32-byte hex into a file -$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key -``` - -Then, specify the path to this file in `docker-compose.yml` -```yaml - volumes: - - frostfs_storage:/storage - - ./my_wallet.key:/node.key -``` - - -NeoFS objects will be stored on your machine. By default, docker-compose -is configured to store objects in named docker volume `frostfs_storage`. You can -specify a directory on the filesystem to store objects there. - -```yaml - volumes: - - /home/username/frostfs/rc3/storage:/storage - - ./my_wallet.key:/node.key -``` - -### Start - -Run the node with `docker-compose up` command and stop it with `docker-compose down`. - -### Debug - -To print node logs, use `docker logs frostfs-testnet`. To print debug messages in -log, set up log level to debug with this env: - -```yaml - environment: - - NEOFS_LOGGER_LEVEL=debug -``` diff --git a/config/testnet/config.yml b/config/testnet/config.yml deleted file mode 100644 index 76b36cdf6..000000000 --- a/config/testnet/config.yml +++ /dev/null @@ -1,52 +0,0 @@ -logger: - level: info - -morph: - rpc_endpoint: - - wss://rpc01.morph.testnet.frostfs.info:51331/ws - - wss://rpc02.morph.testnet.frostfs.info:51331/ws - - wss://rpc03.morph.testnet.frostfs.info:51331/ws - - wss://rpc04.morph.testnet.frostfs.info:51331/ws - - wss://rpc05.morph.testnet.frostfs.info:51331/ws - - wss://rpc06.morph.testnet.frostfs.info:51331/ws - - wss://rpc07.morph.testnet.frostfs.info:51331/ws - dial_timeout: 20s - -contracts: - balance: e0420c216003747626670d1424569c17c79015bf - container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 - netmap: d4b331639799e2958d4bc5b711b469d79de94e01 - -node: - key: /node.key - attribute_0: Deployed:SelfHosted - attribute_1: User-Agent:FrostFS\/0.9999 - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/metabase - perm: 0777 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m diff --git a/docs/shard-modes.md b/docs/shard-modes.md index 3b459335b..6cc4ab13c 100644 --- a/docs/shard-modes.md +++ b/docs/shard-modes.md @@ -51,10 +51,7 @@ However, all mode changing operations are idempotent. ## Automatic mode changes -Shard can automatically switch to a `degraded-read-only` mode in 3 cases: -1. If the metabase was not available or couldn't be opened/initialized during shard startup. -2. If shard error counter exceeds threshold. -3. If the metabase couldn't be reopened during SIGHUP handling. +A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. # Detach shard diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 98d72cb69..da9fdfed0 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -12,21 +12,23 @@ There are some custom types used for brevity: # Structure -| Section | Description | -|------------------------|---------------------------------------------------------------------| -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | +| Section | Description | +|--------------|---------------------------------------------------------| +| `node` | [Node parameters](#node-section) | +| `logger` | [Logging parameters](#logger-section) | +| `pprof` | [PProf configuration](#pprof-section) | +| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | +| `control` | [Control service configuration](#control-section) | +| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | +| `morph` | [N3 blockchain client configuration](#morph-section) | +| `apiclient` | [FrostFS API client configuration](#apiclient-section) | +| `policer` | [Policer service configuration](#policer-section) | +| `replicator` | [Replicator service configuration](#replicator-section) | +| `storage` | [Storage engine configuration](#storage-section) | +| `runtime` | [Runtime configuration](#runtime-section) | +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | +| `qos` | [QoS configuration](#qos-section) | # `control` section ```yaml @@ -110,11 +112,21 @@ Contains logger parameters. ```yaml logger: level: info + tags: + - names: "main, morph" + level: debug ``` -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| Parameter | Type | Default value | Description | +|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| +| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | + +## `tags` subsection +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | +| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract @@ -147,15 +159,19 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 switch_interval: 2m + netmap: + candidates: + poll_interval: 20s ``` -| Parameter | Type | Default value | Description | -| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| Parameter | Type | Default value | Description | +|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | +| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | +| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | +| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | +| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | ## `rpc_endpoint` subsection | Parameter | Type | Default value | Description | @@ -169,7 +185,6 @@ Local storage engine configuration. | Parameter | Type | Default value | Description | |----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| -| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. | | `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | | `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | | `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | @@ -180,20 +195,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting `default` subsection has the same format and specifies defaults for missing values. The following table describes configuration for each shard. -| Parameter | Type | Default value | Description | -| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `compress` | `bool` | `false` | Flag to enable compression. | -| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| Parameter | Type | Default value | Description | +| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | +| `compression` | [Compression config](#compression-subsection) | | Compression config. | +| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | +| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | +| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | +| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | +| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | +| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | +| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | +| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | + +### `compression` subsection + +Contains compression config. + +```yaml +compression: + enabled: true + level: smallest_size + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 +``` + +| Parameter | Type | Default value | Description | +| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | `bool` | `false` | Flag to enable compression. | +| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | +| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | +| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | +| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | ### `blobstor` subsection @@ -208,7 +244,7 @@ blobstor: width: 4 - type: fstree path: /path/to/blobstor/blobovnicza - perm: 0644 + perm: 0o644 size: 4194304 depth: 1 width: 4 @@ -268,7 +304,7 @@ gc: ```yaml metabase: path: /path/to/meta.db - perm: 0644 + perm: 0o644 max_batch_size: 200 max_batch_delay: 20ms ``` @@ -300,6 +336,65 @@ writecache: | `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | | `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | +### `limits` subsection + +```yaml +limits: + max_read_running_ops: 10000 + max_read_waiting_ops: 1000 + max_write_running_ops: 1000 + max_write_waiting_ops: 100 + read: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 +``` + +| Parameter | Type | Default value | Description | +| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | +| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | +| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | +| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `read` | `[]tag` | empty | Array of shard read settings for tags. | +| `write` | `[]tag` | empty | Array of shard write settings for tags. | +| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | +| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | +| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | +| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | +| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | # `node` section @@ -315,22 +410,22 @@ node: - "Price:11" - "UN-LOCODE:RU MSK" - "key:value" - relay: false persistent_sessions: path: /sessions persistent_state: path: /state + locode_db_path: "/path/to/locode/db" ``` -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `relay` | `bool` | | Enable relay mode. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| Parameter | Type | Default value | Description | +|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| +| `key` | `string` | | Path to the binary-encoded private key. | +| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | +| `addresses` | `[]string` | | Addresses advertised in the netmap. | +| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | +| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | +| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | ## `wallet` subsection N3 wallet configuration. @@ -395,18 +490,16 @@ replicator: pool_size: 10 ``` -| Parameter | Type | Default value | Description | -|---------------|------------|----------------------------------------|---------------------------------------------| -| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | -| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. | +| Parameter | Type | Default value | Description | +|---------------|------------|---------------|---------------------------------------------| +| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | +| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. | # `object` section Contains object-service related parameters. ```yaml object: - put: - remote_pool_size: 100 get: priority: - $attribute:ClusterName @@ -415,10 +508,29 @@ object: | Parameter | Type | Default value | Description | |-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| | `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | -| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | | `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | + +# `rpc` section +Contains limits on the number of active RPC for specified method(s). + +```yaml +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 +``` + +| Parameter | Type | Default value | Description | +|------------------|------------|---------------|--------------------------------------------------------------| +| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) | +| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit | + # `runtime` section Contains runtime parameters. @@ -471,3 +583,20 @@ multinet: | `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | | `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | | `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | + +# `qos` section +```yaml +qos: + critical: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 +``` +| Parameter | Type | Default value | Description | +| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- | +| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. | +| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. | diff --git a/go.mod b/go.mod index cc6b0a202..6f1950936 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,18 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.22 +go 1.23.0 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 + git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 @@ -27,7 +28,7 @@ require ( github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.14.0 + github.com/multiformats/go-multiaddr v0.15.0 github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 @@ -43,10 +44,9 @@ require ( go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 - golang.org/x/term v0.27.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 + golang.org/x/term v0.30.0 google.golang.org/grpc v1.69.2 google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 @@ -85,9 +85,9 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -123,13 +123,14 @@ require ( go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/text v0.23.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index eae467b31..5b075f60a 100644 --- a/go.sum +++ b/go.sum @@ -1,23 +1,25 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 h1:pP19IawSdsLCKFv7HMNfWAeH6E3uSnntKZkwka+/2+4= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= @@ -143,14 +145,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -190,8 +192,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= -github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= +github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -322,15 +324,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -351,8 +353,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -379,16 +381,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -396,15 +398,15 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -443,7 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/internal/assert/cond.go b/internal/assert/cond.go new file mode 100644 index 000000000..113d2eba9 --- /dev/null +++ b/internal/assert/cond.go @@ -0,0 +1,29 @@ +package assert + +import ( + "fmt" + "strings" +) + +func True(cond bool, details ...string) { + if !cond { + panic(strings.Join(details, " ")) + } +} + +func False(cond bool, details ...string) { + if cond { + panic(strings.Join(details, " ")) + } +} + +func NoError(err error, details ...string) { + if err != nil { + content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) + panic(content) + } +} + +func Fail(details ...string) { + panic(strings.Join(details, " ")) +} diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 0610dc175..626372f43 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -125,7 +125,6 @@ const ( SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" SearchLocalOperationFailed = "local operation failed" UtilObjectServiceError = "object service error" - UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" @@ -199,6 +198,7 @@ const ( EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" EngineInterruptGettingLockers = "can't get object's lockers" EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" + EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" @@ -253,6 +253,7 @@ const ( ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" + ShardCouldNotFindObject = "could not find object" WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" @@ -510,4 +511,11 @@ const ( BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" WritecacheCantGetObject = "can't get an object from fstree" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" + FailedToParseIncomingIOTag = "failed to parse incoming IO tag" + NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" + FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" + FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" + WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" + FailedToUpdateNetmapCandidates = "update netmap candidates failed" + UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" ) diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go index cb165de69..9123541ff 100644 --- a/internal/metrics/consts.go +++ b/internal/metrics/consts.go @@ -23,6 +23,7 @@ const ( policerSubsystem = "policer" commonCacheSubsystem = "common_cache" multinetSubsystem = "multinet" + qosSubsystem = "qos" successLabel = "success" shardIDLabel = "shard_id" @@ -43,6 +44,7 @@ const ( hitLabel = "hit" cacheLabel = "cache" sourceIPLabel = "source_ip" + ioTagLabel = "io_tag" readWriteMode = "READ_WRITE" readOnlyMode = "READ_ONLY" diff --git a/internal/metrics/node.go b/internal/metrics/node.go index 4ea3c7c24..8ade19eb2 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -26,6 +26,7 @@ type NodeMetrics struct { morphCache *morphCacheMetrics log logger.LogMetrics multinet *multinetMetrics + qos *QoSMetrics // nolint: unused appInfo *ApplicationInfo } @@ -55,6 +56,7 @@ func NewNodeMetrics() *NodeMetrics { log: logger.NewLogMetrics(namespace), appInfo: NewApplicationInfo(misc.Version), multinet: newMultinetMetrics(namespace), + qos: newQoSMetrics(), } } @@ -126,3 +128,7 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics { func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { return m.multinet } + +func (m *NodeMetrics) QoSMetrics() *QoSMetrics { + return m.qos +} diff --git a/internal/metrics/object.go b/internal/metrics/object.go index 0ba994ed3..e4f6dfde1 100644 --- a/internal/metrics/object.go +++ b/internal/metrics/object.go @@ -9,13 +9,14 @@ import ( ) type ObjectServiceMetrics interface { - AddRequestDuration(method string, d time.Duration, success bool) + AddRequestDuration(method string, d time.Duration, success bool, ioTag string) AddPayloadSize(method string, size int) } type objectServiceMetrics struct { - methodDuration *prometheus.HistogramVec - payloadCounter *prometheus.CounterVec + methodDuration *prometheus.HistogramVec + payloadCounter *prometheus.CounterVec + ioTagOpsCounter *prometheus.CounterVec } func newObjectServiceMetrics() *objectServiceMetrics { @@ -32,14 +33,24 @@ func newObjectServiceMetrics() *objectServiceMetrics { Name: "request_payload_bytes", Help: "Object Service request payload", }, []string{methodLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: objectSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } -func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) { +func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { m.methodDuration.With(prometheus.Labels{ methodLabel: method, successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: method, + }).Inc() } func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go new file mode 100644 index 000000000..be6878142 --- /dev/null +++ b/internal/metrics/qos.go @@ -0,0 +1,52 @@ +package metrics + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type QoSMetrics struct { + opsCounter *prometheus.GaugeVec +} + +func newQoSMetrics() *QoSMetrics { + return &QoSMetrics{ + opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: qosSubsystem, + Name: "operations_total", + Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", + }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), + } +} + +func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) { + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "pending", + }).Set(float64(pending)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "in_progress", + }).Set(float64(inProgress)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "completed", + }).Set(float64(completed)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "resource_exhausted", + }).Set(float64(resourceExhausted)) +} + +func (m *QoSMetrics) Close(shardID string) { + m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) +} diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go index 6702aa83c..e192c4398 100644 --- a/internal/metrics/treeservice.go +++ b/internal/metrics/treeservice.go @@ -12,12 +12,14 @@ type TreeMetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type treeServiceMetrics struct { replicateTaskDuration *prometheus.HistogramVec replicateWaitDuration *prometheus.HistogramVec syncOpDuration *prometheus.HistogramVec + ioTagOpsCounter *prometheus.CounterVec } var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) @@ -42,6 +44,12 @@ func newTreeServiceMetrics() *treeServiceMetrics { Name: "sync_duration_seconds", Help: "Duration of synchronization operations", }, []string{successLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: treeServiceSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } @@ -62,3 +70,10 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) { successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) } + +func (m *treeServiceMetrics) AddOperation(op string, ioTag string) { + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: op, + }).Inc() +} diff --git a/internal/qos/config.go b/internal/qos/config.go new file mode 100644 index 000000000..d90b403b5 --- /dev/null +++ b/internal/qos/config.go @@ -0,0 +1,31 @@ +package qos + +import ( + "math" + "time" +) + +const ( + NoLimit int64 = math.MaxInt64 + DefaultIdleTimeout = 5 * time.Minute +) + +type LimiterConfig struct { + Read OpConfig + Write OpConfig +} + +type OpConfig struct { + MaxWaitingOps int64 + MaxRunningOps int64 + IdleTimeout time.Duration + Tags []IOTagConfig +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 + Prohibited bool +} diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go new file mode 100644 index 000000000..58cd9e52c --- /dev/null +++ b/internal/qos/grpc.go @@ -0,0 +1,86 @@ +package qos + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "google.golang.org/grpc" +) + +func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String()) + return handler(ctx, req) + } +} + +func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return invoker(ctx, method, req, reply, cc, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag.IsLocal() { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return invoker(ctx, method, req, reply, cc, opts...) + } +} + +func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return streamer(ctx, desc, cc, method, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag.IsLocal() { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return streamer(ctx, desc, cc, method, opts...) + } +} + +func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() { + return handler(ctx, req) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return nil, new(apistatus.ResourceExhausted) + } + defer release() + + return handler(ctx, req) + } +} + +//nolint:contextcheck (grpc.ServerStream manages the context itself) +func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() { + return handler(srv, ss) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return new(apistatus.ResourceExhausted) + } + defer release() + + return handler(srv, ss) + } +} diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go new file mode 100644 index 000000000..7d0826754 --- /dev/null +++ b/internal/qos/grpc_test.go @@ -0,0 +1,219 @@ +package qos_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +const ( + okKey = "ok" +) + +var ( + errTest = errors.New("mock") + errWrongTag = errors.New("wrong tag") + errNoTag = errors.New("failed to get tag from context") + errResExhausted *apistatus.ResourceExhausted + tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} +) + +type mockGRPCServerStream struct { + grpc.ServerStream + + ctx context.Context +} + +func (m *mockGRPCServerStream) Context() context.Context { + return m.ctx +} + +type limiter struct { + acquired bool + released bool +} + +func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { + l.acquired = true + if key != okKey { + return nil, false + } + return func() { l.released = true }, true +} + +func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(ctx context.Context, req any) (any, error) { + return nil, errTest + } + _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) + return err +} + +func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(srv any, stream grpc.ServerStream) error { + return errTest + } + err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ + FullMethod: methodName, + }, handler) + return err +} + +func Test_MaxActiveRPCLimiter(t *testing.T) { + // UnaryServerInterceptor + t.Run("unary fail", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := unaryMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) + // StreamServerInterceptor + t.Run("stream fail", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := streamMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) +} + +func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { + interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() + called := false + handler := func(ctx context.Context, req any) (any, error) { + called = true + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { + return nil, nil + } + return nil, errWrongTag + } + _, err := interceptor(context.Background(), nil, nil, handler) + require.NoError(t, err) + require.True(t, called) +} + +func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() + + // check context with no value + called := false + invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil + } + require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) + require.True(t, called) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return errNoTag + } + if raw != targetTag { + return errWrongTag + } + return nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) +} + +func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() + + // check context with no value + called := false + streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil, nil + } + _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) + require.True(t, called) + require.NoError(t, err) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return nil, errNoTag + } + if raw != targetTag { + return nil, errWrongTag + } + return nil, nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + _, err := interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + _, err = interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) +} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go new file mode 100644 index 000000000..2d7de32fc --- /dev/null +++ b/internal/qos/limiter.go @@ -0,0 +1,246 @@ +package qos + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" +) + +const ( + defaultIdleTimeout time.Duration = 0 + defaultShare float64 = 1.0 + minusOne = ^uint64(0) + + defaultMetricsCollectTimeout = 5 * time.Second +) + +type ReleaseFunc scheduling.ReleaseFunc + +type Limiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) + SetParentID(string) + SetMetrics(Metrics) + Close() +} + +type scheduler interface { + RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error) + Close() +} + +func NewLimiter(c LimiterConfig) (Limiter, error) { + if err := c.Validate(); err != nil { + return nil, err + } + readScheduler, err := createScheduler(c.Read) + if err != nil { + return nil, fmt.Errorf("create read scheduler: %w", err) + } + writeScheduler, err := createScheduler(c.Write) + if err != nil { + return nil, fmt.Errorf("create write scheduler: %w", err) + } + l := &mClockLimiter{ + readScheduler: readScheduler, + writeScheduler: writeScheduler, + closeCh: make(chan struct{}), + wg: &sync.WaitGroup{}, + readStats: createStats(), + writeStats: createStats(), + } + l.shardID.Store(&shardID{}) + l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}}) + l.startMetricsCollect() + return l, nil +} + +func createScheduler(config OpConfig) (scheduler, error) { + if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { + return newSemaphoreScheduler(config.MaxRunningOps), nil + } + return scheduling.NewMClock( + uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps), + converToSchedulingTags(config.Tags), config.IdleTimeout) +} + +func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { + result := make(map[string]scheduling.TagInfo) + for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { + result[tag.String()] = scheduling.TagInfo{ + Share: defaultShare, + } + } + for _, l := range limits { + v := result[l.Tag] + if l.Weight != nil && *l.Weight != 0 { + v.Share = *l.Weight + } + if l.LimitOps != nil && *l.LimitOps != 0 { + v.LimitIOPS = l.LimitOps + } + if l.ReservedOps != nil && *l.ReservedOps != 0 { + v.ReservedIOPS = l.ReservedOps + } + v.Prohibited = l.Prohibited + result[l.Tag] = v + } + return result +} + +var ( + _ Limiter = (*noopLimiter)(nil) + releaseStub ReleaseFunc = func() {} + noopLimiterInstance = &noopLimiter{} +) + +func NewNoopLimiter() Limiter { + return noopLimiterInstance +} + +type noopLimiter struct{} + +func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) SetParentID(string) {} + +func (n *noopLimiter) Close() {} + +func (n *noopLimiter) SetMetrics(Metrics) {} + +var _ Limiter = (*mClockLimiter)(nil) + +type shardID struct { + id string +} + +type mClockLimiter struct { + readScheduler scheduler + writeScheduler scheduler + + readStats map[string]*stat + writeStats map[string]*stat + + shardID atomic.Pointer[shardID] + metrics atomic.Pointer[metricsHolder] + closeCh chan struct{} + wg *sync.WaitGroup +} + +func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.readScheduler, n.readStats) +} + +func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.writeScheduler, n.writeStats) +} + +func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = IOTagClient.String() + } + stat := getStat(tag, stats) + stat.pending.Add(1) + if tag == IOTagCritical.String() { + stat.inProgress.Add(1) + return func() { + stat.completed.Add(1) + }, nil + } + rel, err := s.RequestArrival(ctx, tag) + stat.inProgress.Add(1) + if err != nil { + if isResourceExhaustedErr(err) { + stat.resourceExhausted.Add(1) + return nil, &apistatus.ResourceExhausted{} + } + stat.completed.Add(1) + return nil, err + } + return func() { + rel() + stat.completed.Add(1) + }, nil +} + +func (n *mClockLimiter) Close() { + n.readScheduler.Close() + n.writeScheduler.Close() + close(n.closeCh) + n.wg.Wait() + n.metrics.Load().metrics.Close(n.shardID.Load().id) +} + +func (n *mClockLimiter) SetParentID(parentID string) { + n.shardID.Store(&shardID{id: parentID}) +} + +func (n *mClockLimiter) SetMetrics(m Metrics) { + n.metrics.Store(&metricsHolder{metrics: m}) +} + +func (n *mClockLimiter) startMetricsCollect() { + n.wg.Add(1) + go func() { + defer n.wg.Done() + + ticker := time.NewTicker(defaultMetricsCollectTimeout) + defer ticker.Stop() + for { + select { + case <-n.closeCh: + return + case <-ticker.C: + shardID := n.shardID.Load().id + if shardID == "" { + continue + } + metrics := n.metrics.Load().metrics + exportMetrics(metrics, n.readStats, shardID, "read") + exportMetrics(metrics, n.writeStats, shardID, "write") + } + } + }() +} + +func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) { + var pending uint64 + var inProgress uint64 + var completed uint64 + var resExh uint64 + for tag, s := range stats { + pending = s.pending.Load() + inProgress = s.inProgress.Load() + completed = s.completed.Load() + resExh = s.resourceExhausted.Load() + if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 { + continue + } + metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) + } +} + +func isResourceExhaustedErr(err error) bool { + return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || + errors.Is(err, errSemaphoreLimitExceeded) || + errors.Is(err, scheduling.ErrTagRequestsProhibited) +} diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go new file mode 100644 index 000000000..c00da51b7 --- /dev/null +++ b/internal/qos/metrics.go @@ -0,0 +1,31 @@ +package qos + +import "sync/atomic" + +type Metrics interface { + SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) + Close(shardID string) +} + +var _ Metrics = (*noopMetrics)(nil) + +type noopMetrics struct{} + +func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) { +} + +func (n *noopMetrics) Close(string) {} + +// stat presents limiter statistics cumulative counters. +// +// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`. +type stat struct { + completed atomic.Uint64 + pending atomic.Uint64 + resourceExhausted atomic.Uint64 + inProgress atomic.Uint64 +} + +type metricsHolder struct { + metrics Metrics +} diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go new file mode 100644 index 000000000..74e6928f3 --- /dev/null +++ b/internal/qos/semaphore.go @@ -0,0 +1,39 @@ +package qos + +import ( + "context" + "errors" + + qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" +) + +var ( + _ scheduler = (*semaphore)(nil) + errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded") +) + +type semaphore struct { + s *qosSemaphore.Semaphore +} + +func newSemaphoreScheduler(size int64) *semaphore { + return &semaphore{ + s: qosSemaphore.NewSemaphore(size), + } +} + +func (s *semaphore) Close() {} + +func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if s.s.Acquire() { + return s.s.Release, nil + } + return nil, errSemaphoreLimitExceeded +} diff --git a/internal/qos/stats.go b/internal/qos/stats.go new file mode 100644 index 000000000..3ecfad9f9 --- /dev/null +++ b/internal/qos/stats.go @@ -0,0 +1,29 @@ +package qos + +const unknownStatsTag = "unknown" + +var statTags = map[string]struct{}{ + IOTagBackground.String(): {}, + IOTagClient.String(): {}, + IOTagCritical.String(): {}, + IOTagInternal.String(): {}, + IOTagPolicer.String(): {}, + IOTagTreeSync.String(): {}, + IOTagWritecache.String(): {}, + unknownStatsTag: {}, +} + +func createStats() map[string]*stat { + result := make(map[string]*stat) + for tag := range statTags { + result[tag] = &stat{} + } + return result +} + +func getStat(tag string, stats map[string]*stat) *stat { + if v, ok := stats[tag]; ok { + return v + } + return stats[unknownStatsTag] +} diff --git a/internal/qos/tags.go b/internal/qos/tags.go new file mode 100644 index 000000000..e3f7cafd6 --- /dev/null +++ b/internal/qos/tags.go @@ -0,0 +1,59 @@ +package qos + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" +) + +type IOTag string + +const ( + IOTagBackground IOTag = "background" + IOTagClient IOTag = "client" + IOTagCritical IOTag = "critical" + IOTagInternal IOTag = "internal" + IOTagPolicer IOTag = "policer" + IOTagTreeSync IOTag = "treesync" + IOTagWritecache IOTag = "writecache" + + ioTagUnknown IOTag = "" +) + +func FromRawString(s string) (IOTag, error) { + switch s { + case string(IOTagBackground): + return IOTagBackground, nil + case string(IOTagClient): + return IOTagClient, nil + case string(IOTagCritical): + return IOTagCritical, nil + case string(IOTagInternal): + return IOTagInternal, nil + case string(IOTagPolicer): + return IOTagPolicer, nil + case string(IOTagTreeSync): + return IOTagTreeSync, nil + case string(IOTagWritecache): + return IOTagWritecache, nil + default: + return ioTagUnknown, fmt.Errorf("unknown tag %s", s) + } +} + +func (t IOTag) String() string { + return string(t) +} + +func IOTagFromContext(ctx context.Context) string { + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = "undefined" + } + return tag +} + +func (t IOTag) IsLocal() bool { + return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go new file mode 100644 index 000000000..70f1f24e8 --- /dev/null +++ b/internal/qos/validate.go @@ -0,0 +1,91 @@ +package qos + +import ( + "errors" + "fmt" + "math" +) + +var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") + +type tagConfig struct { + Shares, Limit, Reserved *float64 +} + +func (c *LimiterConfig) Validate() error { + if err := validateOpConfig(c.Read); err != nil { + return fmt.Errorf("limits 'read' section validation error: %w", err) + } + if err := validateOpConfig(c.Write); err != nil { + return fmt.Errorf("limits 'write' section validation error: %w", err) + } + return nil +} + +func validateOpConfig(c OpConfig) error { + if c.MaxRunningOps <= 0 { + return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) + } + if c.MaxWaitingOps <= 0 { + return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps) + } + if c.IdleTimeout <= 0 { + return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String()) + } + if err := validateTags(c.Tags); err != nil { + return fmt.Errorf("'tags' config section validation error: %w", err) + } + return nil +} + +func validateTags(configTags []IOTagConfig) error { + tags := map[IOTag]tagConfig{ + IOTagBackground: {}, + IOTagClient: {}, + IOTagInternal: {}, + IOTagPolicer: {}, + IOTagTreeSync: {}, + IOTagWritecache: {}, + } + for _, t := range configTags { + tag, err := FromRawString(t.Tag) + if err != nil { + return fmt.Errorf("invalid tag %s: %w", t.Tag, err) + } + if _, ok := tags[tag]; !ok { + return fmt.Errorf("tag %s is not configurable", t.Tag) + } + tags[tag] = tagConfig{ + Shares: t.Weight, + Limit: t.LimitOps, + Reserved: t.ReservedOps, + } + } + idx := 0 + var shares float64 + for t, v := range tags { + if idx == 0 { + idx++ + shares = float64Value(v.Shares) + } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { + return errWeightsMustBeSpecified + } + if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { + return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) + } + if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { + return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) + } + if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { + return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String()) + } + } + return nil +} + +func float64Value(f *float64) float64 { + if f == nil { + return 0.0 + } + return *f +} diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go index d4bc0cf68..91ee5c6c3 100644 --- a/pkg/core/client/util.go +++ b/pkg/core/client/util.go @@ -3,6 +3,7 @@ package client import ( "bytes" "fmt" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -19,7 +20,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro // Args must not be nil. func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { PublicKey() []byte - IterateAddresses(func(string) bool) + Addresses() iter.Seq[string] NumberOfAddresses() int ExternalAddresses() []string }, diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index a24b36944..61c568052 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -26,10 +26,10 @@ func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { // IsIndexedContainer returns True if container attributes should be indexed. func IsIndexedContainer(cnr containerSDK.Container) bool { var isS3Container bool - cnr.IterateAttributes(func(key, _ string) { + for key := range cnr.Attributes() { if key == ".s3-location-constraint" { isS3Container = true } - }) + } return !isS3Container } diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index b0c9e1f9e..e58e42634 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -1,6 +1,10 @@ package netmap -import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +import ( + "iter" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) // Node is a named type of netmap.NodeInfo which provides interface needed // in the current repository. Node is expected to be used everywhere instead @@ -14,10 +18,20 @@ func (x Node) PublicKey() []byte { return (netmap.NodeInfo)(x).PublicKey() } +// Addresses returns an iterator over all announced network addresses. +func (x Node) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() +} + // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. +// Deprecated: use [Node.Addresses] instead. func (x Node) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) + for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { + if f(s) { + return + } + } } // NumberOfAddresses returns number of announced network addresses. diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 239a9f389..dc336eb34 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -410,11 +411,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -483,12 +484,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -559,12 +560,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -596,26 +597,3 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } - -type testNetmapSource struct { - netmaps map[uint64]*netmap.NetMap - currentEpoch uint64 -} - -func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - if diff >= s.currentEpoch { - return nil, fmt.Errorf("invalid diff") - } - return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) -} - -func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.netmaps[epoch]; found { - return nm, nil - } - return nil, fmt.Errorf("netmap not found") -} - -func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) { - return s.currentEpoch, nil -} diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go index 67c9a3188..aab12ebf9 100644 --- a/pkg/core/object/info.go +++ b/pkg/core/object/info.go @@ -13,6 +13,13 @@ type ECInfo struct { Total uint32 } +func (v *ECInfo) String() string { + if v == nil { + return "" + } + return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) +} + // Info groups object address with its FrostFS // object info. type Info struct { @@ -23,5 +30,5 @@ type Info struct { } func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject) + return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index f7b71dbe6..3d236641e 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -50,7 +50,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), @@ -159,7 +159,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli } else { // create governance processor governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, FrostFSClient: frostfsCli, AlphabetState: s, @@ -225,7 +225,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, @@ -247,7 +247,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, AlphabetState: s, @@ -268,7 +268,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, FrostFSClient: frostfsCli, @@ -291,7 +291,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, @@ -342,7 +342,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, controlsrv.WithAllowedKeys(authKeys), - ), log, audit) + ), log.WithTag(logger.TagGrpcSvc), audit) grpcControlSrv := grpc.NewServer() control.RegisterControlServiceServer(grpcControlSrv, controlSvc) @@ -458,7 +458,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- } morphChain := &chainParams{ - log: s.log, + log: s.log.WithTag(logger.TagMorph), cfg: cfg, key: s.key, name: morphPrefix, diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index ae5661905..3a5137261 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan ) (*Server, error) { var err error server := &Server{ - log: log, + log: log.WithTag(logger.TagIr), irMetrics: metrics, cmode: cmode, } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 854e2c779..8e4ab2623 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -209,7 +209,7 @@ func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr container return fmt.Errorf("could not get setting in contract: %w", err) } - if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting { + if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting { return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting) } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index 08ef8b86c..a6c40f9fa 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Blobovnicza")) + c.log = l } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index d9e99d0d1..3e8b9f07b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -158,11 +158,11 @@ func (b *Blobovniczas) Path() string { } // SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Config) { +func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { b.compression = cc } -func (b *Blobovniczas) Compressor() *compression.Config { +func (b *Blobovniczas) Compressor() *compression.Compressor { return b.compression } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index ec9743b57..f87f4a144 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -19,7 +19,8 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { st := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(10), WithBlobovniczaShallowDepth(1), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index 5414140f0..df2b4ffe5 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -19,7 +19,8 @@ func TestExistsInvalidStorageID(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go index d390ecf1d..9244d765c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go @@ -15,7 +15,8 @@ func TestGeneric(t *testing.T) { helper := func(t *testing.T, dir string) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -43,7 +44,8 @@ func TestControl(t *testing.T) { newTree := func(t *testing.T) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index f2f9509ad..6438f715b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string { return b.path } -// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDbManager struct { +// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. +type levelDBManager struct { dbMtx *sync.RWMutex databases map[uint64]*sharedDB @@ -157,8 +157,8 @@ type levelDbManager struct { func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDbManager { - result := &levelDbManager{ +) *levelDBManager { + result := &levelDBManager{ databases: make(map[uint64]*sharedDB), dbMtx: &sync.RWMutex{}, @@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st return result } -func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { +func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { res := m.getDBIfExists(idx) if res != nil { return res @@ -181,14 +181,14 @@ func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { return m.getOrCreateDB(idx) } -func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB { +func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { m.dbMtx.RLock() defer m.dbMtx.RUnlock() return m.databases[idx] } -func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { +func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { m.dbMtx.Lock() defer m.dbMtx.Unlock() @@ -202,7 +202,7 @@ func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { return db } -func (m *levelDbManager) hasAnyDB() bool { +func (m *levelDBManager) hasAnyDB() bool { m.dbMtx.RLock() defer m.dbMtx.RUnlock() @@ -213,7 +213,7 @@ func (m *levelDbManager) hasAnyDB() bool { // // The blobovnicza opens at the first request, closes after the last request. type dbManager struct { - levelToManager map[string]*levelDbManager + levelToManager map[string]*levelDBManager levelToManagerGuard *sync.RWMutex closedFlag *atomic.Bool dbCounter *openDBCounter @@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, options: options, readOnly: readOnly, metrics: metrics, - levelToManager: make(map[string]*levelDbManager), + levelToManager: make(map[string]*levelDBManager), levelToManagerGuard: &sync.RWMutex{}, log: log, closedFlag: &atomic.Bool{}, @@ -266,7 +266,7 @@ func (m *dbManager) Close() { m.dbCounter.WaitUntilAllClosed() } -func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { result := m.getLevelManagerIfExists(lvlPath) if result != nil { return result @@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { return m.getOrCreateLevelManager(lvlPath) } -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { m.levelToManagerGuard.RLock() defer m.levelToManagerGuard.RUnlock() return m.levelToManager[lvlPath] } -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { m.levelToManagerGuard.Lock() defer m.levelToManagerGuard.Unlock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 0e1b2022e..5f268b0f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -19,7 +19,7 @@ type cfg struct { openedCacheSize int blzShallowDepth uint64 blzShallowWidth uint64 - compression *compression.Config + compression *compression.Compressor blzOpts []blobovnicza.Option reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics @@ -63,10 +63,15 @@ func initConfig(c *cfg) { } } -func WithLogger(l *logger.Logger) Option { +func WithBlobovniczaTreeLogger(log *logger.Logger) Option { return func(c *cfg) { - c.log = l - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l)) + c.log = log + } +} + +func WithBlobovniczaLogger(log *logger.Logger) Option { + return func(c *cfg) { + c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 16ef2b180..a840275b8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm var res common.RebuildRes b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) - completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage) + completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += completedPreviosMoves if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) @@ -79,7 +79,7 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common. var completedDBCount uint32 for _, db := range dbs { b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) - movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter) + movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += movedObjects if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) @@ -195,7 +195,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil } -func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { shDB := b.getBlobovnicza(ctx, path) blz, err := shDB.Open(ctx) if err != nil { @@ -212,7 +212,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if err != nil { return 0, err } - migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter) + migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter) if err != nil { return migratedObjects, err } @@ -226,7 +226,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) - sysPath = sysPath + rebuildSuffix + sysPath += rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) if err != nil { return nil, err @@ -238,7 +238,7 @@ func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (fun }, nil } -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) { var result atomic.Uint64 batch := make(map[oid.Address][]byte) @@ -253,7 +253,12 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn }) for { - _, err := blz.Iterate(ctx, prm) + release, err := limiter.ReadRequest(ctx) + if err != nil { + return result.Load(), err + } + _, err = blz.Iterate(ctx, prm) + release() if err != nil && !errors.Is(err, errBatchFull) { return result.Load(), err } @@ -265,13 +270,19 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - if err := limiter.AcquireWorkSlot(egCtx); err != nil { + release, err := limiter.AcquireWorkSlot(egCtx) + if err != nil { _ = eg.Wait() return result.Load(), err } eg.Go(func() error { - defer limiter.ReleaseWorkSlot() - err := b.moveObject(egCtx, blz, blzPath, addr, data, meta) + defer release() + moveRelease, err := limiter.WriteRequest(ctx) + if err != nil { + return err + } + err = b.moveObject(egCtx, blz, blzPath, addr, data, meta) + moveRelease() if err == nil { result.Add(1) } @@ -317,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo return nil } -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) { +func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() @@ -330,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDb.CloseAndRemoveFile(ctx); err != nil { + if err := shDB.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) @@ -359,7 +370,7 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error { return b.dropDirectoryIfEmpty(filepath.Dir(path)) } -func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) { +func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) { var count uint64 var rebuildTempFilesToRemove []string err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { @@ -372,13 +383,24 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co } defer shDB.Close(ctx) + release, err := rateLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } incompletedMoves, err := blz.ListMoveInfo(ctx) + release() if err != nil { return true, err } for _, move := range incompletedMoves { - if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return false, err + } + err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore) + release() + if err != nil { return true, err } count++ @@ -388,9 +410,14 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co return false, nil }) for _, tmp := range rebuildTempFilesToRemove { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return count, err + } if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } + release() } return count, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 2f58624aa..4146ef260 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -140,7 +140,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -161,16 +162,18 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object storageIDs: make(map[oid.Address][]byte), guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 1, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 1, }) require.NoError(t, err) require.Equal(t, uint64(1), rRes.ObjectsMoved) require.Equal(t, uint64(0), rRes.FilesRemoved) require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index aae72b5ff..a7a99fec3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -2,7 +2,9 @@ package blobovniczatree import ( "context" + "fmt" "sync" + "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -48,7 +50,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -76,10 +79,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 60, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 60, }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -94,6 +98,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("no rebuild single db", func(t *testing.T) { @@ -102,7 +107,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -128,10 +134,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 90, // 64KB / 100KB = 64% + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 90, // 64KB / 100KB = 64% }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -146,6 +153,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by fill percent", func(t *testing.T) { @@ -154,7 +162,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -193,10 +202,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 80, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -215,6 +225,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by overflow", func(t *testing.T) { @@ -223,7 +234,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -254,7 +266,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), WithBlobovniczaShallowDepth(1), @@ -266,10 +279,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 80, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -285,6 +299,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) } @@ -294,7 +309,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), // 64KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(2), // depth = 2 @@ -322,7 +338,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(32*1024), // 32KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(3), // depth = 3 @@ -338,9 +355,10 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -356,13 +374,15 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(sourceWidth), WithBlobovniczaShallowDepth(sourceDepth), @@ -403,7 +423,8 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(targetWidth), WithBlobovniczaShallowDepth(targetDepth), @@ -427,9 +448,10 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -445,6 +467,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } type storageIDUpdateStub struct { @@ -462,7 +485,36 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr return nil } -type rebuildLimiterStub struct{} +type rebuildLimiterStub struct { + slots atomic.Int64 + readRequests atomic.Int64 + writeRequests atomic.Int64 +} -func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil } -func (s *rebuildLimiterStub) ReleaseWorkSlot() {} +func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { + s.slots.Add(1) + return func() { s.slots.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { + s.readRequests.Add(1) + return func() { s.readRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { + s.writeRequests.Add(1) + return func() { s.writeRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ValidateReleased() error { + if v := s.slots.Load(); v != 0 { + return fmt.Errorf("invalid slots value %d", v) + } + if v := s.readRequests.Load(); v != 0 { + return fmt.Errorf("invalid read requests value %d", v) + } + if v := s.writeRequests.Load(); v != 0 { + return fmt.Errorf("invalid write requests value %d", v) + } + return nil +} diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index f850f48b4..ceaf2538a 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -41,7 +41,7 @@ type SubStorageInfo struct { type Option func(*cfg) type cfg struct { - compression compression.Config + compression compression.Compressor log *logger.Logger storage []SubStorage metrics Metrics @@ -91,50 +91,13 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "BlobStor")) + c.log = l } } -// WithCompressObjects returns option to toggle -// compression of the stored objects. -// -// If true, Zstandard algorithm is used for data compression. -// -// If compressor (decompressor) creation failed, -// the uncompressed option will be used, and the error -// is recorded in the provided log. -func WithCompressObjects(comp bool) Option { +func WithCompression(comp compression.Config) Option { return func(c *cfg) { - c.compression.Enabled = comp - } -} - -// WithCompressibilityEstimate returns an option to use -// normilized compressibility estimate to decide compress -// data or not. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimate(v bool) Option { - return func(c *cfg) { - c.compression.UseCompressEstimation = v - } -} - -// WithCompressibilityEstimateThreshold returns an option to set -// normilized compressibility estimate threshold. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimateThreshold(threshold float64) Option { - return func(c *cfg) { - c.compression.CompressEstimationThreshold = threshold - } -} - -// WithUncompressableContentTypes returns option to disable decompression -// for specific content types as seen by object.AttributeContentType attribute. -func WithUncompressableContentTypes(values []string) Option { - return func(c *cfg) { - c.compression.UncompressableContentTypes = values + c.compression.Config = comp } } @@ -152,6 +115,6 @@ func WithMetrics(m Metrics) Option { } } -func (b *BlobStor) Compressor() *compression.Config { - return &b.cfg.compression +func (b *BlobStor) Compressor() *compression.Compressor { + return &b.compression } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index 6cc56fa3b..6ddeb6f00 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -51,7 +52,9 @@ func TestCompression(t *testing.T) { newBlobStor := func(t *testing.T, compress bool) *BlobStor { bs := New( - WithCompressObjects(compress), + WithCompression(compression.Config{ + Enabled: compress, + }), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) require.NoError(t, bs.Init(context.Background())) @@ -113,8 +116,10 @@ func TestBlobstor_needsCompression(t *testing.T) { dir := t.TempDir() bs := New( - WithCompressObjects(compress), - WithUncompressableContentTypes(ct), + WithCompression(compression.Config{ + Enabled: compress, + UncompressableContentTypes: ct, + }), WithStorages([]SubStorage{ { Storage: blobovniczatree.NewBlobovniczaTree( diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go index 19e181ee7..788fe66f2 100644 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ b/pkg/local_object_storage/blobstor/common/rebuild.go @@ -12,16 +12,27 @@ type RebuildRes struct { } type RebuildPrm struct { - MetaStorage MetaStorage - WorkerLimiter ConcurrentWorkersLimiter - FillPercent int + MetaStorage MetaStorage + Limiter RebuildLimiter + FillPercent int } type MetaStorage interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() +type ReleaseFunc func() + +type ConcurrencyLimiter interface { + AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) +} + +type RateLimiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) +} + +type RebuildLimiter interface { + ConcurrencyLimiter + RateLimiter } diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index 6ecef48cd..e35c35e60 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -18,8 +18,8 @@ type Storage interface { Path() string ObjectsCount(ctx context.Context) (uint64, error) - SetCompressor(cc *compression.Config) - Compressor() *compression.Config + SetCompressor(cc *compression.Compressor) + Compressor() *compression.Compressor // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 9f70f8ec2..445a0494b 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -11,7 +11,7 @@ import ( ) func BenchmarkCompression(b *testing.B) { - c := Config{Enabled: true} + c := Compressor{Config: Config{Enabled: true}} require.NoError(b, c.Init()) for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { @@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) { } } -func benchWith(b *testing.B, c Config, data []byte) { +func benchWith(b *testing.B, c Compressor, data []byte) { b.ResetTimer() b.ReportAllocs() for range b.N { @@ -56,8 +56,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("estimate", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) @@ -76,8 +78,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("compress", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index 85ab47692..c76cec9a1 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -4,21 +4,36 @@ import ( "bytes" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/klauspost/compress" "github.com/klauspost/compress/zstd" ) +type Level string + +const ( + LevelDefault Level = "" + LevelOptimal Level = "optimal" + LevelFastest Level = "fastest" + LevelSmallestSize Level = "smallest_size" +) + +type Compressor struct { + Config + + encoder *zstd.Encoder + decoder *zstd.Decoder +} + // Config represents common compression-related configuration. type Config struct { Enabled bool UncompressableContentTypes []string + Level Level - UseCompressEstimation bool - CompressEstimationThreshold float64 - - encoder *zstd.Encoder - decoder *zstd.Decoder + EstimateCompressibility bool + EstimateCompressibilityThreshold float64 } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -26,11 +41,11 @@ type Config struct { var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} // Init initializes compression routines. -func (c *Config) Init() error { +func (c *Compressor) Init() error { var err error if c.Enabled { - c.encoder, err = zstd.NewWriter(nil) + c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) if err != nil { return err } @@ -73,7 +88,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { // Decompress decompresses data if it starts with the magic // and returns data untouched otherwise. -func (c *Config) Decompress(data []byte) ([]byte, error) { +func (c *Compressor) Decompress(data []byte) ([]byte, error) { if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { return data, nil } @@ -82,13 +97,13 @@ func (c *Config) Decompress(data []byte) ([]byte, error) { // Compress compresses data if compression is enabled // and returns data untouched otherwise. -func (c *Config) Compress(data []byte) []byte { +func (c *Compressor) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } - if c.UseCompressEstimation { + if c.EstimateCompressibility { estimated := compress.Estimate(data) - if estimated >= c.CompressEstimationThreshold { + if estimated >= c.EstimateCompressibilityThreshold { return c.compress(data) } return data @@ -96,7 +111,7 @@ func (c *Config) Compress(data []byte) []byte { return c.compress(data) } -func (c *Config) compress(data []byte) []byte { +func (c *Compressor) compress(data []byte) []byte { maxSize := c.encoder.MaxEncodedSize(len(data)) compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) if len(data) < len(compressed) { @@ -106,7 +121,7 @@ func (c *Config) compress(data []byte) []byte { } // Close closes encoder and decoder, returns any error occurred. -func (c *Config) Close() error { +func (c *Compressor) Close() error { var err error if c.encoder != nil { err = c.encoder.Close() @@ -116,3 +131,24 @@ func (c *Config) Close() error { } return err } + +func (c *Config) HasValidCompressionLevel() bool { + return c.Level == LevelDefault || + c.Level == LevelOptimal || + c.Level == LevelFastest || + c.Level == LevelSmallestSize +} + +func (c *Compressor) compressionLevel() zstd.EncoderLevel { + switch c.Level { + case LevelDefault, LevelOptimal: + return zstd.SpeedDefault + case LevelFastest: + return zstd.SpeedFastest + case LevelSmallestSize: + return zstd.SpeedBestCompression + default: + assert.Fail("unknown compression level", string(c.Level)) + return zstd.SpeedDefault + } +} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 93316be02..0418eedd0 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -6,6 +6,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "go.uber.org/zap" ) @@ -53,6 +54,10 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag func (b *BlobStor) Init(ctx context.Context) error { b.log.Debug(ctx, logs.BlobstorInitializing) + if !b.compression.HasValidCompressionLevel() { + b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) + b.compression.Level = compression.LevelDefault + } if err := b.compression.Init(); err != nil { return err } diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go index b5dbc9e40..3caee7ee1 100644 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ b/pkg/local_object_storage/blobstor/fstree/counter.go @@ -2,6 +2,8 @@ package fstree import ( "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) // FileCounter used to count files in FSTree. The implementation must be thread-safe. @@ -52,16 +54,11 @@ func (c *SimpleCounter) Dec(size uint64) { c.mtx.Lock() defer c.mtx.Unlock() - if c.count > 0 { - c.count-- - } else { - panic("fstree.SimpleCounter: invalid count") - } - if c.size >= size { - c.size -= size - } else { - panic("fstree.SimpleCounter: invalid size") - } + assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") + c.count-- + + assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") + c.size -= size } func (c *SimpleCounter) CountSize() (uint64, uint64) { diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 031b385b2..112741ab4 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -45,7 +45,7 @@ type FSTree struct { log *logger.Logger - *compression.Config + compressor *compression.Compressor Depth uint64 DirNameLen int @@ -82,7 +82,7 @@ func New(opts ...Option) *FSTree { Permissions: 0o700, RootPath: "./", }, - Config: nil, + compressor: nil, Depth: 4, DirNameLen: DirNameLen, metrics: &noopMetrics{}, @@ -196,7 +196,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err == nil { - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) } if err != nil { if prm.IgnoreErrors { @@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } if !prm.DontCompress { - prm.RawData = t.Compress(prm.RawData) + prm.RawData = t.compressor.Compress(prm.RawData) } size = len(prm.RawData) @@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err } } - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) if err != nil { return common.GetRes{}, err } @@ -597,12 +597,12 @@ func (t *FSTree) Path() string { } // SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Config) { - t.Config = cc +func (t *FSTree) SetCompressor(cc *compression.Compressor) { + t.compressor = cc } -func (t *FSTree) Compressor() *compression.Config { - return t.Config +func (t *FSTree) Compressor() *compression.Compressor { + return t.compressor } // SetReportErrorFunc implements common.Storage. diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 07a618b0a..6d633dad6 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -67,12 +67,9 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { err := w.writeFile(tmpPath, data) if err != nil { var pe *fs.PathError - if errors.As(err, &pe) { - switch pe.Err { - case syscall.ENOSPC: - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) - } + if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { + err = common.ErrNoSpace + _ = os.RemoveAll(tmpPath) } return err } diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 7155ddcbb..6f2ac87e1 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -4,7 +4,6 @@ import ( "io/fs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) type Option func(*FSTree) @@ -53,6 +52,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = l.With(zap.String("component", "FSTree")) + f.log = l } } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index 36b2c33f8..d54c54f59 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -3,6 +3,7 @@ package blobstortest import ( "context" "errors" + "slices" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -26,7 +27,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { _, err := s.Delete(context.Background(), delPrm) require.NoError(t, err) - objects = append(objects[:delID], objects[delID+1:]...) + objects = slices.Delete(objects, delID, delID+1) runTestNormalHandler(t, s, objects) @@ -49,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) _, err := s.Iterate(context.Background(), iterPrm) require.NoError(t, err) - require.Equal(t, len(objects), len(seen)) + require.Len(t, objects, len(seen)) for i := range objects { d, ok := seen[objects[i].addr.String()] require.True(t, ok) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index ccfa510fe..2786321a8 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -8,6 +8,7 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -24,7 +25,9 @@ func TestIterateObjects(t *testing.T) { // create BlobStor instance blobStor := New( WithStorages(defaultStorages(p, smalSz)), - WithCompressObjects(true), + WithCompression(compression.Config{ + Enabled: true, + }), ) defer os.RemoveAll(p) diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 95a916662..3df96a1c3 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -16,7 +16,7 @@ func (s *memstoreImpl) Init() error func (s *memstoreImpl) Close(context.Context) error { return nil } func (s *memstoreImpl) Type() string { return Type } func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } +func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 97a03993d..7605af4e5 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -7,7 +7,7 @@ import ( type cfg struct { rootPath string readOnly bool - compression *compression.Config + compression *compression.Compressor } func defaultConfig() *cfg { diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go index 2a6b94789..f28816555 100644 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ b/pkg/local_object_storage/blobstor/rebuild.go @@ -13,19 +13,14 @@ type StorageIDUpdate interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error { +func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error { var summary common.RebuildRes var rErr error for _, storage := range b.storage { res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{ - MetaStorage: upd, - WorkerLimiter: limiter, - FillPercent: fillPercent, + MetaStorage: upd, + Limiter: concLimiter, + FillPercent: fillPercent, }) summary.FilesRemoved += res.FilesRemoved summary.ObjectsMoved += res.ObjectsMoved diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index fb1188751..3a38ecf82 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -17,8 +17,8 @@ type cfg struct { Type func() string Path func() string - SetCompressor func(cc *compression.Config) - Compressor func() *compression.Config + SetCompressor func(cc *compression.Compressor) + Compressor func() *compression.Compressor SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) @@ -45,11 +45,11 @@ func WithClose(f func() error) Option { return func(c *cfg) { c func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } -func WithSetCompressor(f func(*compression.Config)) Option { +func WithSetCompressor(f func(*compression.Compressor)) Option { return func(c *cfg) { c.overrides.SetCompressor = f } } -func WithCompressor(f func() *compression.Config) Option { +func WithCompressor(f func() *compression.Compressor) Option { return func(c *cfg) { c.overrides.Compressor = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index 626ba0023..190b6a876 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -116,7 +116,7 @@ func (s *TestStore) Path() string { } } -func (s *TestStore) SetCompressor(cc *compression.Config) { +func (s *TestStore) SetCompressor(cc *compression.Compressor) { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Config) { } } -func (s *TestStore) Compressor() *compression.Config { +func (s *TestStore) Compressor() *compression.Compressor { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index b2d7a1037..e0617a832 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -48,8 +48,9 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e.containerSize(ctx, prm) - return nil + var csErr error + res, csErr = e.containerSize(ctx, prm) + return csErr }) return @@ -69,12 +70,13 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er return res.Size(), nil } -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { + var res ContainerSizeRes + err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.Shard.ContainerSize(csPrm) + csRes, err := sh.ContainerSize(ctx, csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) @@ -86,7 +88,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) return false }) - return + return res, err } // ListContainers returns a unique container IDs presented in the engine objects. @@ -96,8 +98,9 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) defer elapsed("ListContainers", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e.listContainers(ctx) - return nil + var lcErr error + res, lcErr = e.listContainers(ctx) + return lcErr }) return @@ -115,11 +118,11 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { return res.Containers(), nil } -func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { +func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { uniqueIDs := make(map[string]cid.ID) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { e.reportShardError(ctx, sh, "can't get list of containers", err) return false @@ -133,7 +136,9 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { } return false - }) + }); err != nil { + return ListContainersRes{}, err + } result := make([]cid.ID, 0, len(uniqueIDs)) for _, v := range uniqueIDs { @@ -142,5 +147,5 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { return ListContainersRes{ containers: result, - } + }, nil } diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 6a416cfd9..bf1649f6e 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -22,10 +22,6 @@ type shardInitError struct { // Open opens all StorageEngine's components. func (e *StorageEngine) Open(ctx context.Context) error { - return e.open(ctx) -} - -func (e *StorageEngine) open(ctx context.Context) error { e.mtx.Lock() defer e.mtx.Unlock() @@ -77,7 +73,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { errCh := make(chan shardInitError, len(e.shards)) var eg errgroup.Group - if e.cfg.lowMem && e.anyShardRequiresRefill() { + if e.lowMem && e.anyShardRequiresRefill() { eg.SetLimit(1) } @@ -149,20 +145,14 @@ var errClosed = errors.New("storage engine is closed") func (e *StorageEngine) Close(ctx context.Context) error { close(e.closeCh) defer e.wg.Wait() - return e.setBlockExecErr(ctx, errClosed) + return e.closeEngine(ctx) } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) close(ctx context.Context, releasePools bool) error { +func (e *StorageEngine) closeAllShards(ctx context.Context) error { e.mtx.RLock() defer e.mtx.RUnlock() - if releasePools { - for _, p := range e.shardPools { - p.Release() - } - } - for id, sh := range e.shards { if err := sh.Close(ctx); err != nil { e.log.Debug(ctx, logs.EngineCouldNotCloseShard, @@ -182,70 +172,23 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error { e.blockExec.mtx.RLock() defer e.blockExec.mtx.RUnlock() - if e.blockExec.err != nil { - return e.blockExec.err + if e.blockExec.closed { + return errClosed } return op() } -// sets the flag of blocking execution of all data operations according to err: -// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method -// (if err == errClosed => additionally releases pools and does not allow to resume executions). -// - otherwise, resumes execution. If exec was blocked, calls open method. -// -// Can be called concurrently with exec. In this case it waits for all executions to complete. -func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { +func (e *StorageEngine) closeEngine(ctx context.Context) error { e.blockExec.mtx.Lock() defer e.blockExec.mtx.Unlock() - prevErr := e.blockExec.err - - wasClosed := errors.Is(prevErr, errClosed) - if wasClosed { + if e.blockExec.closed { return errClosed } - e.blockExec.err = err - - if err == nil { - if prevErr != nil { // block -> ok - return e.open(ctx) - } - } else if prevErr == nil { // ok -> block - return e.close(ctx, errors.Is(err, errClosed)) - } - - // otherwise do nothing - - return nil -} - -// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err. -// To resume the execution, use ResumeExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources -// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -// -// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution -// for this. -func (e *StorageEngine) BlockExecution(err error) error { - return e.setBlockExecErr(context.Background(), err) -} - -// ResumeExecution resumes the execution of any data-related operation. -// To block the execution, use BlockExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources -// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -func (e *StorageEngine) ResumeExecution() error { - return e.setBlockExecErr(context.Background(), nil) + e.blockExec.closed = true + return e.closeAllShards(ctx) } type ReConfiguration struct { diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index c9efc312c..4ff0ed5ec 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -2,7 +2,6 @@ package engine import ( "context" - "errors" "fmt" "io/fs" "os" @@ -12,17 +11,14 @@ import ( "testing" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -163,42 +159,6 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O require.Equal(t, 1, shardCount) } -func TestExecBlocks(t *testing.T) { - e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many - - // put some object - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - - addr := object.AddressOf(obj) - - require.NoError(t, Put(context.Background(), e, obj, false)) - - // block executions - errBlock := errors.New("block exec err") - - require.NoError(t, e.BlockExecution(errBlock)) - - // try to exec some op - _, err := Head(context.Background(), e, addr) - require.ErrorIs(t, err, errBlock) - - // resume executions - require.NoError(t, e.ResumeExecution()) - - _, err = Head(context.Background(), e, addr) // can be any data-related op - require.NoError(t, err) - - // close - require.NoError(t, e.Close(context.Background())) - - // try exec after close - _, err = Head(context.Background(), e, addr) - require.Error(t, err) - - // try to resume - require.Error(t, e.ResumeExecution()) -} - func TestPersistentShardID(t *testing.T) { dir := t.TempDir() @@ -245,7 +205,6 @@ func TestReload(t *testing.T) { // no new paths => no new shards require.Equal(t, shardNum, len(e.shards)) - require.Equal(t, shardNum, len(e.shardPools)) newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) @@ -257,7 +216,6 @@ func TestReload(t *testing.T) { require.NoError(t, e.Reload(context.Background(), rcfg)) require.Equal(t, shardNum+1, len(e.shards)) - require.Equal(t, shardNum+1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -277,7 +235,6 @@ func TestReload(t *testing.T) { // removed one require.Equal(t, shardNum-1, len(e.shards)) - require.Equal(t, shardNum-1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -311,7 +268,6 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str } require.Equal(t, num, len(e.shards)) - require.Equal(t, num, len(e.shardPools)) return e, currShards } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 5e5f65fa2..223cdbc48 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -71,7 +71,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // Removal of a big object is done in multiple stages: // 1. Remove the parent object. If it is locked or already removed, return immediately. // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { var existsPrm shard.ExistsPrm existsPrm.Address = prm.addr @@ -116,20 +116,22 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // If a parent object is removed we should set GC mark on each shard. return splitInfo == nil - }) + }); err != nil { + return err + } if locked.is { return new(apistatus.ObjectLocked) } if splitInfo != nil { - e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) + return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } return nil } -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { +func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { var fs objectSDK.SearchFilters fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) @@ -142,7 +144,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo inhumePrm.ForceRemoval() } - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 85652b3ae..376d545d3 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -12,8 +12,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -28,16 +28,13 @@ type StorageEngine struct { shards map[string]hashedShard - shardPools map[string]util.WorkerPool - closeCh chan struct{} setModeCh chan setModeRequest wg sync.WaitGroup blockExec struct { - mtx sync.RWMutex - - err error + mtx sync.RWMutex + closed bool } evacuateLimiter *evacuationLimiter } @@ -176,7 +173,10 @@ func (e *StorageEngine) reportShardError( } func isLogical(err error) bool { - return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) + return errors.As(err, &logicerr.Logical{}) || + errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) || + errors.As(err, new(*apistatus.ResourceExhausted)) } // Option represents StorageEngine's constructor option. @@ -189,8 +189,6 @@ type cfg struct { metrics MetricRegister - shardPoolSize uint32 - lowMem bool containerSource atomic.Pointer[containerSource] @@ -198,9 +196,8 @@ type cfg struct { func defaultCfg() *cfg { res := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - shardPoolSize: 20, - metrics: noopMetrics{}, + log: logger.NewLoggerWrapper(zap.L()), + metrics: noopMetrics{}, } res.containerSource.Store(&containerSource{}) return res @@ -214,13 +211,18 @@ func New(opts ...Option) *StorageEngine { opts[i](c) } + evLimMtx := &sync.RWMutex{} + evLimCond := sync.NewCond(evLimMtx) + return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - shardPools: make(map[string]util.WorkerPool), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{}, + cfg: c, + shards: make(map[string]hashedShard), + closeCh: make(chan struct{}), + setModeCh: make(chan setModeRequest), + evacuateLimiter: &evacuationLimiter{ + guard: evLimMtx, + statusCond: evLimCond, + }, } } @@ -237,13 +239,6 @@ func WithMetrics(v MetricRegister) Option { } } -// WithShardPoolSize returns option to specify size of worker pool for each shard. -func WithShardPoolSize(sz uint32) Option { - return func(c *cfg) { - c.shardPoolSize = sz - } -} - // WithErrorThreshold returns an option to specify size amount of errors after which // shard is moved to read-only mode. func WithErrorThreshold(sz uint32) Option { diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 926ff43f3..fc6d9ee9c 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -2,9 +2,14 @@ package engine import ( "context" + "fmt" "path/filepath" + "runtime/debug" + "strings" + "sync" "testing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -55,7 +60,6 @@ func (te *testEngineWrapper) setShardsNumOpts( te.shardIDs[i] = shard.ID() } require.Len(t, te.engine.shards, num) - require.Len(t, te.engine.shardPools, num) return te } @@ -90,6 +94,7 @@ func testGetDefaultShardOptions(t testing.TB) []shard.Option { ), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), + shard.WithLimiter(&testQoSLimiter{t: t}), } } @@ -111,7 +116,8 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1), blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithLogger(test.NewLogger(t))), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < smallSize }, @@ -151,3 +157,78 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, }, smallFileStorage, largeFileStorage } + +var _ qos.Limiter = (*testQoSLimiter)(nil) + +type testQoSLimiter struct { + t testing.TB + quard sync.Mutex + id int64 + readStacks map[int64][]byte + writeStacks map[int64][]byte +} + +func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} + +func (t *testQoSLimiter) Close() { + t.quard.Lock() + defer t.quard.Unlock() + + var sb strings.Builder + var seqN int + for _, stack := range t.readStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) + } + for _, stack := range t.writeStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) + } + require.True(t.t, seqN == 0, sb.String()) +} + +func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.readStacks == nil { + t.readStacks = make(map[int64][]byte) + } + t.readStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.readStacks, id) + }, nil +} + +func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.writeStacks == nil { + t.writeStacks = make(map[int64][]byte) + } + t.writeStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.writeStacks, id) + }, nil +} + +func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index d68a7e826..57029dd5f 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -46,7 +46,6 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) var testShards [2]*testShard te := testNewEngine(t, - WithShardPoolSize(1), WithErrorThreshold(errThreshold), ). setShardsNumOpts(t, 2, func(id int) []shard.Option { diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 27eaea768..c08dfbf03 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -15,7 +15,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -201,11 +200,6 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { return res } -type pooledShard struct { - hashedShard - pool util.WorkerPool -} - var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") // Evacuate moves data from one shard to the others. @@ -252,7 +246,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro } var mtx sync.RWMutex - copyShards := func() []pooledShard { + copyShards := func() []hashedShard { mtx.RLock() defer mtx.RUnlock() t := slices.Clone(shards) @@ -266,7 +260,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { var err error ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", @@ -388,7 +382,7 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha } func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", @@ -412,7 +406,7 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel } func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { sh := shardsToEvacuate[shardID] @@ -485,7 +479,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context } func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { sh := shardsToEvacuate[shardID] shards := getShards() @@ -515,7 +509,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, } func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", trace.WithAttributes( @@ -583,7 +577,7 @@ func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.S } func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) (bool, string, error) { target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) if err != nil { @@ -653,15 +647,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar // findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, -) (pooledShard, bool, error) { + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, +) (hashedShard, bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) - var result pooledShard + var result hashedShard var found bool for _, target := range shards { select { case <-ctx.Done(): - return pooledShard{}, false, ctx.Err() + return hashedShard{}, false, ctx.Err() default: } @@ -689,7 +683,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora return result, found, nil } -func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) { +func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { e.mtx.RLock() defer e.mtx.RUnlock() @@ -719,18 +713,15 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) // We must have all shards, to have correct information about their // indexes in a sorted slice and set appropriate marks in the metabase. // Evacuated shard is skipped during put. - shards := make([]pooledShard, 0, len(e.shards)) + shards := make([]hashedShard, 0, len(e.shards)) for id := range e.shards { - shards = append(shards, pooledShard{ - hashedShard: e.shards[id], - pool: e.shardPools[id], - }) + shards = append(shards, e.shards[id]) } return shards, nil } func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() @@ -800,7 +791,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool { } func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, ) (bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) for j := range shards { @@ -813,7 +804,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } - switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status { + switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index c74134500..b75e8686d 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -95,8 +95,7 @@ func (s *EvacuationState) StartedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.startedAt == defaultTime { + if s.startedAt.IsZero() { return nil } return &s.startedAt @@ -106,8 +105,7 @@ func (s *EvacuationState) FinishedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.finishedAt == defaultTime { + if s.finishedAt.IsZero() { return nil } return &s.finishedAt @@ -141,7 +139,8 @@ type evacuationLimiter struct { eg *errgroup.Group cancel context.CancelFunc - guard sync.RWMutex + guard *sync.RWMutex + statusCond *sync.Cond // used in unit tests } func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { @@ -167,6 +166,7 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res startedAt: time.Now().UTC(), result: result, } + l.statusCond.Broadcast() return l.eg, egCtx, nil } @@ -182,6 +182,7 @@ func (l *evacuationLimiter) Complete(err error) { l.state.processState = EvacuateProcessStateCompleted l.state.errMessage = errMsq l.state.finishedAt = time.Now().UTC() + l.statusCond.Broadcast() l.eg = nil } @@ -216,6 +217,7 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error { l.state = EvacuationState{} l.eg = nil l.cancel = nil + l.statusCond.Broadcast() return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index b9d7888e7..f2ba7d994 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -196,7 +196,6 @@ func TestEvacuateShardObjects(t *testing.T) { e.mtx.Lock() delete(e.shards, evacuateShardID) - delete(e.shardPools, evacuateShardID) e.mtx.Unlock() checkHasObjects(t) @@ -205,11 +204,10 @@ func TestEvacuateShardObjects(t *testing.T) { func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { var st *EvacuationState var err error - require.Eventually(t, func() bool { - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err) - return st.ProcessingStatus() == EvacuateProcessStateCompleted - }, 3*time.Second, 10*time.Millisecond) + e.evacuateLimiter.waitForCompleted() + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) return st } @@ -405,8 +403,8 @@ func TestEvacuateSingleProcess(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -447,8 +445,8 @@ func TestEvacuateObjectsAsync(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -475,7 +473,7 @@ func TestEvacuateObjectsAsync(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") - st = testWaitForEvacuationCompleted(t, e) + st := testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") return nil }) @@ -818,3 +816,12 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { t.Logf("evacuate took %v\n", time.Since(start)) require.NoError(t, err) } + +func (l *evacuationLimiter) waitForCompleted() { + l.guard.Lock() + defer l.guard.Unlock() + + for l.state.processState != EvacuateProcessStateCompleted { + l.statusCond.Wait() + } +} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 9d2b1c1b7..7dac9eb97 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool exists := false locked := false - e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Exists(ctx, shPrm) if err != nil { if client.IsErrObjectAlreadyRemoved(err) { @@ -50,7 +50,9 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } return false - }) + }); err != nil { + return false, false, err + } if alreadyRemoved { return false, false, new(apistatus.ObjectAlreadyRemoved) diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 74c64bbb6..0694c53f3 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -78,7 +78,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return GetRes{}, err + } if it.SplitInfo != nil { return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -97,7 +99,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } - it.tryGetFromBlobstore(ctx) + if err := it.tryGetFromBlobstore(ctx); err != nil { + return GetRes{}, err + } if it.Object == nil { return GetRes{}, it.OutError @@ -133,8 +137,8 @@ type getShardIterator struct { ecInfoErr *objectSDK.ECInfoError } -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.ShardPrm.SetIgnoreMeta(noMeta) @@ -187,13 +191,13 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { }) } -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) { +func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already visited. return false diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d6892f129..d436dd411 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -82,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) shPrm.SetAddress(prm.addr) shPrm.SetRaw(prm.raw) - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold res, err := sh.Head(ctx, shPrm) if err != nil { @@ -123,7 +123,9 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) } head = res.Object() return true - }) + }); err != nil { + return HeadRes{}, err + } if head != nil { return HeadRes{head: head}, nil diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index fb802ef2a..e5f7072e2 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -74,7 +74,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { } func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { - addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) + addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) if err != nil { return err } @@ -84,8 +84,6 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { shPrm.ForceRemoval() } - var errLocked *apistatus.ObjectLocked - for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { shPrm.SetTarget(*prm.tombstone, addrs...) @@ -103,39 +101,107 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { } if _, err := sh.Inhume(ctx, shPrm); err != nil { - switch { - case errors.As(err, &errLocked): - case errors.Is(err, shard.ErrLockObjectRemoval): - case errors.Is(err, shard.ErrReadOnlyMode): - case errors.Is(err, shard.ErrDegradedMode): - default: - e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) - } + e.reportInhumeError(ctx, err, sh) return err } } - return nil + return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) +} + +func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { + if err == nil { + return + } + + var errLocked *apistatus.ObjectLocked + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) + } +} + +// inhumeNotFoundObjects removes object which are not found on any shard. +// +// Besides an object not being found on any shard, it is also important to +// remove it anyway in order to populate the metabase indexes because they are +// responsible for the correct object status, i.e., the status will be `object +// not found` without the indexes, the status will be `object is already +// removed` with the indexes. +// +// It is suggested to evenly remove those objects on each shard with the batch +// size equal to 1 + floor(number of objects / number of shards). +func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { + if len(addrs) == 0 { + return nil + } + + var shPrm shard.InhumePrm + if prm.forceRemoval { + shPrm.ForceRemoval() + } + + numObjectsPerShard := 1 + len(addrs)/len(e.shards) + + var inhumeErr error + itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + numObjects := min(numObjectsPerShard, len(addrs)) + + if numObjects == 0 { + return true + } + + if prm.tombstone != nil { + shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) + } else { + shPrm.MarkAsGarbage(addrs[:numObjects]...) + } + addrs = addrs[numObjects:] + + _, inhumeErr = hs.Inhume(ctx, shPrm) + e.reportInhumeError(ctx, inhumeErr, hs) + return inhumeErr != nil + }) + if inhumeErr != nil { + return inhumeErr + } + return itErr } // groupObjectsByShard groups objects based on the shard(s) they are stored on. // // If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of // the objects are locked. -func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (map[string][]oid.Address, error) { - groups := make(map[string][]oid.Address) +// +// Returns two sets of objects: found objects which are grouped per shard and +// not found object. Not found objects are objects which are not found on any +// shard. This can happen if a node is a container node but doesn't participate +// in a replica group of the object. +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { + groups = make(map[string][]oid.Address) + var ids []string for _, addr := range addrs { - ids, err := e.findShards(ctx, addr, checkLocked) + ids, err = e.findShards(ctx, addr, checkLocked) if err != nil { - return nil, err + return } + + if len(ids) == 0 { + notFoundObjects = append(notFoundObjects, addr) + continue + } + for _, id := range ids { groups[id] = append(groups[id], addr) } } - return groups, nil + return } // findShards determines the shard(s) where the object is stored. @@ -158,7 +224,7 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL objectExists bool ) - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { objectExists = false prm.Address = addr @@ -186,10 +252,6 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL default: } - if !objectExists { - return - } - if checkLocked { if isLocked, err := sh.IsLocked(ctx, addr); err != nil { e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, @@ -202,11 +264,20 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL } } + // This exit point must come after checking if the object is locked, + // since the locked index may be populated even if the object doesn't + // exist. + if !objectExists { + return + } + ids = append(ids, sh.ID().String()) // Continue if it's a root object. return !isRootObject - }) + }); err != nil { + return nil, err + } if retErr != nil { return nil, retErr @@ -226,8 +297,8 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var err error var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locked, err = h.Shard.IsLocked(ctx, addr) + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { + locked, err = h.IsLocked(ctx, addr) if err != nil { e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) outErr = err @@ -235,7 +306,9 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e } return locked - }) + }); err != nil { + return false, err + } if locked { return locked, nil @@ -255,15 +328,17 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I var allLocks []oid.ID var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locks, err := h.Shard.GetLocks(ctx, addr) + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { + locks, err := h.GetLocks(ctx, addr) if err != nil { e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) outErr = err } allLocks = append(allLocks, locks...) return false - }) + }); err != nil { + return nil, err + } if len(allLocks) > 0 { return allLocks, nil } @@ -271,20 +346,23 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) return true default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) + } } func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredLocks(ctx, epoch, lockers) select { @@ -294,11 +372,13 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) + } } func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleDeletedLocks(ctx, lockers) select { @@ -308,26 +388,25 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) + } } func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerSizePrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -339,7 +418,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid var drop []cid.ID for id := range idMap { prm.SetContainerID(id) - s, err := sh.ContainerSize(prm) + s, err := sh.ContainerSize(ctx, prm) if err != nil { e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true @@ -354,13 +433,15 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -378,12 +459,13 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerSize(id.EncodeToString()) } @@ -393,19 +475,16 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerCountPrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -432,13 +511,15 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -456,12 +537,13 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerCount(id.EncodeToString()) } diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 8c5d28b15..0e268cd23 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -11,6 +11,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -205,7 +206,7 @@ func BenchmarkInhumeMultipart(b *testing.B) { func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { b.StopTimer() - engine := testNewEngine(b, WithShardPoolSize(uint32(numObjects))). + engine := testNewEngine(b). setShardsNum(b, numShards).prepare(b).engine defer func() { require.NoError(b, engine.Close(context.Background())) }() @@ -242,3 +243,100 @@ func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { b.StopTimer() } } + +func TestInhumeIfObjectDoesntExist(t *testing.T) { + const numShards = 4 + + engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine + t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) + + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, true) + }) + + t.Run("object is locked", func(t *testing.T) { + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, false, true) + }) + }) +} + +func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) + + err := testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + +func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testLockObject(e, object)) + + err := testInhumeObject(t, e, object, withTombstone, withForce) + if !withForce { + var errLocked *apistatus.ObjectLocked + require.ErrorAs(t, err, &errLocked) + return + } + require.NoError(t, err) + + err = testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + +func testLockObject(e *StorageEngine, obj oid.Address) error { + return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) +} + +func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { + tombstone := oidtest.Address() + tombstone.SetContainer(obj.Container()) + + // Due to the tests design it is possible to set both the options, + // however removal with tombstone and force removal are exclusive. + require.False(t, withTombstone && withForce) + + var inhumePrm InhumePrm + if withTombstone { + inhumePrm.WithTarget(tombstone, obj) + } else { + inhumePrm.MarkAsGarbage(obj) + } + if withForce { + inhumePrm.WithForceRemoval() + } + return e.Inhume(context.Background(), inhumePrm) +} + +func testHeadObject(e *StorageEngine, obj oid.Address) error { + var headPrm HeadPrm + headPrm.WithAddress(obj) + + _, err := e.Head(context.Background(), headPrm) + return err +} diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 5d43e59df..3b0cf74f9 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -41,11 +41,19 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { + st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: - switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { + st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: @@ -61,13 +69,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { // code is pretty similar to inhumeAddr, maybe unify? root := false var addrLocked oid.Address addrLocked.SetContainer(idCnr) addrLocked.SetObject(locked) - e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) { + retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { defer func() { // if object is root we continue since information about it // can be presented in other shards @@ -84,17 +92,11 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError if errors.As(err, &eiErr) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err = objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return false - } - eclocked = append(eclocked, objID) + eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) + if !ok { + return false } + err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), @@ -137,3 +139,18 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo }) return } + +func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { + eclocked := []oid.ID{locked} + for _, chunk := range eiErr.ECInfo().Chunks { + var objID oid.ID + err := objID.ReadFromV2(chunk.ID) + if err != nil { + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) + return nil, false + } + eclocked = append(eclocked, objID) + } + return eclocked, true +} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 64288a511..10cf5ffd5 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -97,17 +96,19 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } var shRes putToShardRes - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() - pool, ok := e.shardPools[sh.ID().String()] + _, ok := e.shards[sh.ID().String()] e.mtx.RUnlock() if !ok { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer) + shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown - }) + }); err != nil { + return err + } switch shRes.status { case putToShardUnknown: return errPutShard @@ -122,70 +123,59 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. -func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, +func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, ) (res putToShardRes) { - exitCh := make(chan struct{}) + var existPrm shard.ExistsPrm + existPrm.Address = addr - if err := pool.Submit(func() { - defer close(exitCh) - - var existPrm shard.ExistsPrm - existPrm.Address = addr - - exists, err := sh.Exists(ctx, existPrm) - if err != nil { - if shard.IsErrObjectExpired(err) { - // object is already found but - // expired => do nothing with it - res.status = putToShardExists - } else { - e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - } - - return // this is not ErrAlreadyRemoved error so we can go to the next shard - } - - if exists.Exists() { + exists, err := sh.Exists(ctx, existPrm) + if err != nil { + if shard.IsErrObjectExpired(err) { + // object is already found but + // expired => do nothing with it res.status = putToShardExists - return + } else { + e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) } - var putPrm shard.PutPrm - putPrm.SetObject(obj) - putPrm.SetIndexAttributes(isIndexedContainer) - - _, err = sh.Put(ctx, putPrm) - if err != nil { - if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || - errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - return - } - if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - res.status = putToShardRemoved - res.err = err - return - } - - e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) - return - } - - res.status = putToShardSuccess - }); err != nil { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err)) - close(exitCh) + return // this is not ErrAlreadyRemoved error so we can go to the next shard } - <-exitCh + if exists.Exists() { + res.status = putToShardExists + return + } + + var putPrm shard.PutPrm + putPrm.SetObject(obj) + putPrm.SetIndexAttributes(isIndexedContainer) + + _, err = sh.Put(ctx, putPrm) + if err != nil { + if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || + errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + return + } + if client.IsErrObjectAlreadyRemoved(err) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + res.status = putToShardRemoved + res.err = err + return + } + + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) + return + } + + res.status = putToShardSuccess return } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index a468cf594..7ec4742d8 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -93,7 +93,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return RngRes{}, err + } if it.SplitInfo != nil { return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -109,7 +111,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } - it.tryGetFromBlobstor(ctx) + if err := it.tryGetFromBlobstor(ctx); err != nil { + return RngRes{}, err + } if it.Object == nil { return RngRes{}, it.OutError @@ -157,8 +161,8 @@ type getRangeShardIterator struct { Engine *StorageEngine } -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.HasDegraded = i.HasDegraded || noMeta i.ShardPrm.SetIgnoreMeta(noMeta) @@ -209,13 +213,13 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { }) } -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) { +func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already processed it without a metabase. return false diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go index 83c6a54ed..a29dd7ed9 100644 --- a/pkg/local_object_storage/engine/rebuild.go +++ b/pkg/local_object_storage/engine/rebuild.go @@ -4,6 +4,7 @@ import ( "context" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" @@ -41,7 +42,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } resGuard := &sync.Mutex{} - limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit) + concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)} eg, egCtx := errgroup.WithContext(ctx) for _, shardID := range prm.ShardIDs { @@ -61,7 +62,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{ - ConcurrencyLimiter: limiter, + ConcurrencyLimiter: concLimiter, TargetFillPercent: prm.TargetFillPercent, }) @@ -88,3 +89,20 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } return res, nil } + +type concurrencyLimiter struct { + semaphore chan struct{} +} + +func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + select { + case l.semaphore <- struct{}{}: + return l.releaseWorkSlot, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (l *concurrencyLimiter) releaseWorkSlot() { + <-l.semaphore +} diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index fc8b4a9a7..4243a5481 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -54,14 +54,15 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e._select(ctx, prm) - return nil + var sErr error + res, sErr = e._select(ctx, prm) + return sErr }) return } -func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { +func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) @@ -69,7 +70,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -84,11 +85,13 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, - } + }, nil } // List returns `limit` available physically storage object addresses in engine. @@ -98,20 +101,21 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e.list(ctx, limit) - return nil + var lErr error + res, lErr = e.list(ctx, limit) + return lErr }) return } -func (e *StorageEngine) list(ctx context.Context, limit uint64) SelectRes { +func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) ln := uint64(0) // consider iterating over shuffled shards - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -130,11 +134,13 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) SelectRes { } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, - } + }, nil } // Select selects objects from local storage using provided filters. diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 8e191f72c..69067c500 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -11,10 +11,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/hrw" "github.com/google/uuid" - "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) @@ -116,7 +118,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } - e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) + e.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } @@ -178,11 +180,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { e.mtx.Lock() defer e.mtx.Unlock() - pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) - if err != nil { - return fmt.Errorf("create pool: %w", err) - } - strID := sh.ID().String() if _, ok := e.shards[strID]; ok { return fmt.Errorf("shard with id %s was already added", strID) @@ -196,8 +193,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { hash: hrw.StringHash(strID), } - e.shardPools[strID] = pool - return nil } @@ -222,12 +217,6 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { ss = append(ss, sh) delete(e.shards, id) - pool, ok := e.shardPools[id] - if ok { - pool.Release() - delete(e.shardPools, id) - } - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", id)) } @@ -291,20 +280,32 @@ func (e *StorageEngine) unsortedShards() []hashedShard { return shards } -func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { for i, sh := range e.sortShards(addr) { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(i, sh) { break } } + return nil } -func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { for _, sh := range e.unsortedShards() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(sh) { break } } + return nil } // SetShardMode sets mode of the shard with provided identifier. @@ -329,8 +330,6 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M // HandleNewEpoch notifies every shard about NewEpoch event. func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { - ev := shard.EventNewEpoch(epoch) - e.mtx.RLock() defer e.mtx.RUnlock() @@ -338,7 +337,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { select { case <-ctx.Done(): return - case sh.NotificationChannel() <- ev: + case sh.NotificationChannel() <- epoch: default: e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) @@ -426,12 +425,6 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha delete(e.shards, idStr) - pool, ok := e.shardPools[idStr] - if ok { - pool.Release() - delete(e.shardPools, idStr) - } - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } @@ -442,3 +435,48 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha func (s hashedShard) Hash() uint64 { return s.hash } + +func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) { + var err error + var info []shard.Info + prm := shard.ExistsPrm{ + Address: obj, + } + var siErr *objectSDK.SplitInfoError + var ecErr *objectSDK.ECInfoError + + if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + res, exErr := hs.Exists(ctx, prm) + if exErr != nil { + if client.IsErrObjectAlreadyRemoved(exErr) { + err = new(apistatus.ObjectAlreadyRemoved) + return true + } + + // Check if error is either SplitInfoError or ECInfoError. + // True means the object is virtual. + if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) { + info = append(info, hs.DumpInfo()) + return false + } + + if shard.IsErrObjectExpired(exErr) { + err = exErr + return true + } + + if !client.IsErrObjectNotFound(exErr) { + e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address)) + } + + return false + } + if res.Exists() { + info = append(info, hs.DumpInfo()) + } + return false + }); itErr != nil { + return nil, itErr + } + return info, err +} diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 0bbc7563c..3aa9629b0 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -17,7 +17,6 @@ func TestRemoveShard(t *testing.T) { e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() - require.Equal(t, numOfShards, len(e.shardPools)) require.Equal(t, numOfShards, len(e.shards)) removedNum := numOfShards / 2 @@ -37,7 +36,6 @@ func TestRemoveShard(t *testing.T) { } } - require.Equal(t, numOfShards-removedNum, len(e.shardPools)) require.Equal(t, numOfShards-removedNum, len(e.shards)) for id, removed := range mSh { diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 7f70d36f7..cfd15b4d4 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -230,7 +230,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree } // TreeSortedByFilename implements the pilorama.Forest interface. -func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", trace.WithAttributes( attribute.String("container_id", cid.EncodeToString()), @@ -241,7 +241,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, var err error var nodes []pilorama.MultiNodeInfo - var cursor *string + var cursor *pilorama.Cursor for _, sh := range e.sortShards(cid) { nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) if err != nil { diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go index 383c596af..52b199b0b 100644 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ b/pkg/local_object_storage/internal/testutil/generators.go @@ -1,7 +1,9 @@ package testutil import ( + cryptorand "crypto/rand" "encoding/binary" + "math/rand" "sync/atomic" "testing" @@ -9,7 +11,6 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" - "golang.org/x/exp/rand" ) // AddressGenerator is the interface of types that generate object addresses. @@ -61,7 +62,7 @@ var _ ObjectGenerator = &SeqObjGenerator{} func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { data := make([]byte, sz) - _, _ = rand.Read(data) + _, _ = cryptorand.Read(data) obj := GenerateObjectWithCIDWithPayload(cid, data) obj.SetID(oid) return obj @@ -82,7 +83,7 @@ var _ ObjectGenerator = &RandObjGenerator{} func (g *RandObjGenerator) Next() *objectSDK.Object { var id oid.ID - _, _ = rand.Read(id[:]) + _, _ = cryptorand.Read(id[:]) return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) } diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go index 60e9211d5..1087e40be 100644 --- a/pkg/local_object_storage/internal/testutil/object.go +++ b/pkg/local_object_storage/internal/testutil/object.go @@ -1,6 +1,7 @@ package testutil import ( + "crypto/rand" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -11,7 +12,6 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) const defaultDataSize = 32 diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go new file mode 100644 index 000000000..de1479e6f --- /dev/null +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -0,0 +1,82 @@ +package meta + +import ( + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "go.etcd.io/bbolt" +) + +type bucketCache struct { + locked *bbolt.Bucket + graveyard *bbolt.Bucket + garbage *bbolt.Bucket + expired map[cid.ID]*bbolt.Bucket + primary map[cid.ID]*bbolt.Bucket +} + +func newBucketCache() *bucketCache { + return &bucketCache{} +} + +func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(bucketNameLocked) + } + return getBucket(&bc.locked, tx, bucketNameLocked) +} + +func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(graveyardBucketName) + } + return getBucket(&bc.graveyard, tx, graveyardBucketName) +} + +func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(garbageBucketName) + } + return getBucket(&bc.garbage, tx, garbageBucketName) +} + +func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { + if *cache != nil { + return *cache + } + + *cache = tx.Bucket(name) + return *cache +} + +func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = objectToExpirationEpochBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr) +} + +func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = primaryBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr) +} + +func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { + value, ok := (*m)[cnr] + if ok { + return value + } + + if *m == nil { + *m = make(map[cid.ID]*bbolt.Bucket, 1) + } + + bucketName := make([]byte, bucketKeySize) + bucketName = nameFunc(cnr, bucketName) + (*m)[cnr] = getBucket(&value, tx, bucketName) + return value +} diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index d338e228f..9a5a6e574 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -363,12 +363,12 @@ func (db *DB) deleteObject( func parentLength(tx *bbolt.Tx, addr oid.Address) int { bucketName := make([]byte, bucketKeySize) - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:])) + bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) if bkt == nil { return 0 } - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:]))) + lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) if err != nil { return 0 } @@ -376,11 +376,12 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int { return len(lst) } -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) { +func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt := tx.Bucket(item.name) if bkt != nil { - _ = bkt.Delete(item.key) // ignore error, best effort there + return bkt.Delete(item.key) } + return nil } func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -405,19 +406,16 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { // if list empty, remove the key from bucket if len(lst) == 0 { - _ = bkt.Delete(item.key) // ignore error, best effort there - - return nil + return bkt.Delete(item.key) } // if list is not empty, then update it encodedLst, err := encodeList(lst) if err != nil { - return nil // ignore error, best effort there + return err } - _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there - return nil + return bkt.Put(item.key, encodedLst) } func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -480,35 +478,47 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error return ErrUnknownObjectType } - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: bucketName, key: objKey, - }) + }); err != nil { + return err + } } else { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: parentBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } } - delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index name: smallBucketName(cnr, bucketName), key: objKey, - }) - delUniqueIndexItem(tx, namedBucketItem{ // remove from root index + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index name: rootBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } if expEpoch, ok := hasExpirationEpoch(obj); ok { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: expEpochToObjectBucketName, key: expirationEpochKey(expEpoch, cnr, addr.Object()), - }) - delUniqueIndexItem(tx, namedBucketItem{ + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), key: objKey, - }) + }); err != nil { + return err + } } return nil @@ -535,10 +545,12 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. // also drop EC parent root info if current EC chunk is the last one if !hasAnyChunks { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }) + }); err != nil { + return err + } } if ech.ParentSplitParentID() == nil { @@ -572,11 +584,10 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. } // drop split info - delUniqueIndexItem(tx, namedBucketItem{ + return delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), }) - return nil } func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 962108a76..7bd6f90a6 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -153,12 +153,16 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE // - 2 if object is covered with tombstone; // - 3 if object is expired. func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { + return objectStatusWithCache(nil, tx, addr, currEpoch) +} + +func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { // locked object could not be removed/marked with GC/expired - if objectLocked(tx, addr.Container(), addr.Object()) { + if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { return 0, nil } - expired, err := isExpired(tx, addr, currEpoch) + expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) if err != nil { return 0, err } @@ -167,8 +171,8 @@ func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, erro return 3, nil } - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + graveyardBkt := getGraveyardBucket(bc, tx) + garbageBkt := getGarbageBucket(bc, tx) addrKey := addressKey(addr, make([]byte, addressKeySize)) return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil } diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go index 68144d8b1..a1351cb6f 100644 --- a/pkg/local_object_storage/metabase/expired.go +++ b/pkg/local_object_storage/metabase/expired.go @@ -74,9 +74,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A } func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - bucketName := make([]byte, bucketKeySize) - bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName) - b := tx.Bucket(bucketName) + return isExpiredWithCache(nil, tx, addr, currEpoch) +} + +func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { + b := getExpiredBucket(bc, tx, addr.Container()) if b == nil { return false, nil } diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 615add1af..821810c09 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -88,8 +88,12 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { } func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { + return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch) +} + +func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { if checkStatus { - st, err := objectStatus(tx, addr, currEpoch) + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) if err != nil { return nil, err } @@ -109,12 +113,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b bucketName := make([]byte, bucketKeySize) // check in primary index - data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(data) + if b := getPrimaryBucket(bc, tx, cnr); b != nil { + if data := b.Get(key); len(data) != 0 { + return obj, obj.Unmarshal(data) + } } - data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) + data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) if len(data) != 0 { return nil, getECInfoError(tx, cnr, data) } diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index eaef3b9ba..2a0bd7f6a 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -139,8 +139,7 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, var containerID cid.ID var offset []byte - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + bc := newBucketCache() rawAddr := make([]byte, cidSize, addressKeySize) @@ -169,7 +168,7 @@ loop: bkt := tx.Bucket(name) if bkt != nil { copy(rawAddr, cidRaw) - result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID, + result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, result, count, cursor, threshold, currEpoch) if err != nil { return nil, nil, err @@ -204,9 +203,10 @@ loop: // selectNFromBucket similar to selectAllFromBucket but uses cursor to find // object to start selecting from. Ignores inhumed objects. -func selectNFromBucket(bkt *bbolt.Bucket, // main bucket +func selectNFromBucket( + bc *bucketCache, + bkt *bbolt.Bucket, // main bucket objType objectSDK.Type, // type of the objects stored in the main bucket - graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets cidRaw []byte, // container ID prefix, optimization cnt cid.ID, // container ID to []objectcore.Info, // listing result @@ -219,7 +219,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket cursor = new(Cursor) } - count := len(to) c := bkt.Cursor() k, v := c.First() @@ -231,7 +230,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } for ; k != nil; k, v = c.Next() { - if count >= limit { + if len(to) >= limit { break } @@ -241,6 +240,8 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } offset = k + graveyardBkt := getGraveyardBucket(bc, bkt.Tx()) + garbageBkt := getGarbageBucket(bc, bkt.Tx()) if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { continue } @@ -251,7 +252,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } expEpoch, hasExpEpoch := hasExpirationEpoch(&o) - if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch { + if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { continue } @@ -273,7 +274,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket a.SetContainer(cnt) a.SetObject(obj) to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) - count++ } return to, offset, cursor, nil diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 817b22010..02985991c 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -59,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { - if errors.Is(err, meta.ErrEndOfListing) { + if !errors.Is(err, meta.ErrEndOfListing) { b.Fatalf("error: %v", err) } prm.SetCursor(nil) diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index b930a0141..f4cb9e53b 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -4,8 +4,10 @@ import ( "bytes" "context" "fmt" + "slices" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -62,9 +64,7 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid. return ErrReadOnlyMode } - if len(locked) == 0 { - panic("empty locked list") - } + assert.False(len(locked) == 0, "empty locked list") err := db.lockInternal(locked, cnr, locker) success = err == nil @@ -162,7 +162,11 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { // checks if specified object is locked in the specified container. func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - bucketLocked := tx.Bucket(bucketNameLocked) + return objectLockedWithCache(nil, tx, idCnr, idObj) +} + +func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { + bucketLocked := getLockedBucket(bc, tx) if bucketLocked != nil { key := make([]byte, cidSize) idCnr.Encode(key) @@ -250,7 +254,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres unlockedObjects = append(unlockedObjects, addr) } else { // exclude locker - keyLockers = append(keyLockers[:i], keyLockers[i+1:]...) + keyLockers = slices.Delete(keyLockers, i, i+1) v, err = encodeList(keyLockers) if err != nil { diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 45faecc13..5f0956f0b 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -37,7 +37,7 @@ func TestResetDropsContainerBuckets(t *testing.T) { for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) - putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) + putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) } diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 9f1b8b060..60da50671 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -131,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters res := make([]oid.Address, 0, len(mAddr)) + bc := newBucketCache() for a, ind := range mAddr { if ind != expLen { continue // ignore objects with unmatched fast filters @@ -145,7 +146,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters var addr oid.Address addr.SetContainer(cnr) addr.SetObject(id) - st, err := objectStatus(tx, addr, currEpoch) + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) if err != nil { return nil, err } @@ -153,7 +154,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters continue // ignore removed objects } - addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) + addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) if !match { continue // ignore objects with unmatched slow filters } @@ -451,13 +452,13 @@ func (db *DB) selectObjectID( } // matchSlowFilters return true if object header is matched by all slow filters. -func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { +func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { result := addr if len(f) == 0 { return result, true } - obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch) + obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) if err != nil { return result, false } @@ -515,9 +516,9 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc return result, true } -func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { +func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { buf := make([]byte, addressKeySize) - obj, err := db.get(tx, addr, buf, true, false, currEpoch) + obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) if err != nil { var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { @@ -527,7 +528,7 @@ func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch continue } addr.SetObject(objID) - obj, err = db.get(tx, addr, buf, true, false, currEpoch) + obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) if err == nil { return obj, true, nil } diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 5cc998311..ce2156d2e 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -1216,6 +1216,8 @@ func TestExpiredObjects(t *testing.T) { } func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { + b.ReportAllocs() + var prm meta.SelectPrm prm.SetContainerID(cid) prm.SetFilters(fs) diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index 5444264be..c90de4dd6 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -45,7 +45,7 @@ func TestUpgradeV2ToV3(t *testing.T) { type testContainerInfoProvider struct{} -func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) { +func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) { return container.Info{}, nil } diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 80851f1c4..4ad83332b 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -278,9 +279,7 @@ func objectKey(obj oid.ID, key []byte) []byte { // // firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - if len(objs) == 0 { - panic("empty object list in firstIrregularObjectType") - } + assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") var keys [2][1 + cidSize]byte diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 9d71d9fda..897b37ea0 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1077,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol } // TreeSortedByFilename implements the Forest interface. -func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) { +func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { var ( startedAt = time.Now() success = false @@ -1155,7 +1155,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = &s + last = NewCursor(s, res[len(res)-1].LastChild()) } return res, last, metaerr.Wrap(err) } @@ -1166,10 +1166,10 @@ func sortByFilename(nodes []NodeInfo) { }) } -func sortAndCut(result []NodeInfo, last *string) []NodeInfo { +func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { var lastBytes []byte if last != nil { - lastBytes = []byte(*last) + lastBytes = []byte(last.GetFilename()) } sortByFilename(result) @@ -1582,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error { lm.Child = binary.LittleEndian.Uint64(data) lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.Meta.FromBytes(data[16:]) + return lm.FromBytes(data[16:]) } func (t *boltForest) logToBytes(lm *Move) []byte { w := io.NewBufBinWriter() - size := 8 + 8 + lm.Meta.Size() + 1 + size := 8 + 8 + lm.Size() + 1 // if lm.HasOld { // size += 8 + lm.Old.Meta.Size() // } @@ -1595,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte { w.Grow(size) w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Parent) - lm.Meta.EncodeBinary(w.BinWriter) + lm.EncodeBinary(w.BinWriter) // w.WriteBool(lm.HasOld) // if lm.HasOld { // w.WriteU64LE(lm.Old.Parent) diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index 92183716c..ebfd0bcc0 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -164,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, } // TreeSortedByFilename implements the Forest interface. -func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) { +func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { @@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI var res []NodeInfo for _, nodeID := range nodeIDs { - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) for _, childID := range children { var found bool for _, kv := range s.infoMap[childID].Meta.Items { @@ -204,17 +204,14 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI r := mergeNodeInfos(res) for i := range r { - if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start { - finish := i + count - if len(res) < finish { - finish = len(res) - } + if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { + finish := min(len(res), i+count) last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], &last, nil + return r[i:finish], NewCursor(last, 0), nil } } last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, &last, nil + return nil, NewCursor(last, 0), nil } // TreeGetChildren implements the Forest interface. @@ -225,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str return nil, ErrTreeNotFound } - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) res := make([]NodeInfo, 0, len(children)) for _, childID := range children { res = append(res, NodeInfo{ diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index de56fc82b..844084c55 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) @@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go index 5a00bcf7a..b035be1e1 100644 --- a/pkg/local_object_storage/pilorama/heap.go +++ b/pkg/local_object_storage/pilorama/heap.go @@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any { // fixedHeap maintains a fixed number of smallest elements started at some point. type fixedHeap struct { - start *string + start *Cursor sorted bool count int h *filenameHeap } -func newHeap(start *string, count int) *fixedHeap { +func newHeap(start *Cursor, count int) *fixedHeap { h := new(filenameHeap) heap.Init(h) @@ -50,8 +50,19 @@ func newHeap(start *string, count int) *fixedHeap { const amortizationMultiplier = 5 func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil && filename <= *h.start { - return false + if h.start != nil { + if filename < h.start.GetFilename() { + return false + } else if filename == h.start.GetFilename() { + // A tree may have a lot of nodes with the same filename but different versions so that + // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call + // with the same filename. + pos := slices.Index(id, h.start.GetNode()) + if pos == -1 || pos+1 >= len(id) { + return false + } + id = id[pos+1:] + } } *h.h = append(*h.h, heapInfo{id: id, filename: filename}) diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index ce7b3db1e..28b7faec8 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree { // undo un-does op and changes s in-place. func (s *memoryTree) undo(op *move) { if op.HasOld { - s.tree.infoMap[op.Child] = op.Old + s.infoMap[op.Child] = op.Old } else { - delete(s.tree.infoMap, op.Child) + delete(s.infoMap, op.Child) } } @@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move { }, } - shouldPut := !s.tree.isAncestor(op.Child, op.Parent) - p, ok := s.tree.infoMap[op.Child] + shouldPut := !s.isAncestor(op.Child, op.Parent) + p, ok := s.infoMap[op.Child] if ok { lm.HasOld = true lm.Old = p @@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move { p.Meta = m p.Parent = op.Parent - s.tree.infoMap[op.Child] = p + s.infoMap[op.Child] = p return lm } @@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { } var nodes []Node - var lastTs Timestamp + var lastTS Timestamp children := t.getChildren(curNode) for i := range children { @@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { fileName := string(info.Meta.GetAttr(attr)) if fileName == path[len(path)-1] { if latest { - if info.Meta.Time >= lastTs { + if info.Meta.Time >= lastTS { nodes = append(nodes[:0], children[i]) } } else { diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 1f7e742a2..e1f6cd8e7 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -37,7 +37,7 @@ type Forest interface { TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute.. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) + TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) // TreeGetOpLog returns first log operation stored at or above the height. // In case no such operation is found, empty Move and nil error should be returned. TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) @@ -79,6 +79,38 @@ const ( AttributeVersion = "Version" ) +// Cursor keeps state between function calls for traversing nodes. +// It stores the attributes associated with a previous call, allowing subsequent operations +// to resume traversal from this point rather than starting from the beginning. +type Cursor struct { + // Last traversed filename. + filename string + + // Last traversed node. + node Node +} + +func NewCursor(filename string, node Node) *Cursor { + return &Cursor{ + filename: filename, + node: node, + } +} + +func (c *Cursor) GetFilename() string { + if c == nil { + return "" + } + return c.filename +} + +func (c *Cursor) GetNode() Node { + if c == nil { + return Node(0) + } + return c.node +} + // CIDDescriptor contains container ID and information about the node position // in the list of container nodes. type CIDDescriptor struct { diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go index 106ba6ae9..36d347f10 100644 --- a/pkg/local_object_storage/pilorama/multinode.go +++ b/pkg/local_object_storage/pilorama/multinode.go @@ -25,6 +25,10 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool { return true } +func (r *MultiNodeInfo) LastChild() Node { + return r.Children[len(r.Children)-1] +} + func (n NodeInfo) ToMultiNode() MultiNodeInfo { return MultiNodeInfo{ Children: MultiNode{n.ID}, diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go index 54c2b90a6..eecee1527 100644 --- a/pkg/local_object_storage/pilorama/split_test.go +++ b/pkg/local_object_storage/pilorama/split_test.go @@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) { require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4")) require.Equal(t, []byte{10}, testGetByPath(t, "value0")) - testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) { + testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) { res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize) require.NoError(t, err) return res, last diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 0309f0c81..b4015ae8d 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 { return r.size } -func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { s.m.RLock() defer s.m.RUnlock() @@ -34,6 +34,12 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { return ContainerSizeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerSizeRes{}, err + } + defer release() + size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) @@ -69,6 +75,12 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont return ContainerCountRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerCountRes{}, err + } + defer release() + counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) @@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerSize(ctx, id) } @@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerCount(ctx, id) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index fedde2206..d489b8b0d 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -108,19 +108,17 @@ func (s *Shard) Init(ctx context.Context) error { s.updateMetrics(ctx) s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - eventChan: make(chan Event), - mEventHandler: map[eventType]*eventHandlers{ - eventNewEpoch: { - cancelFunc: func() {}, - handlers: []eventHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, - }, + gcCfg: &s.gcCfg, + remover: s.removeGarbage, + stopChannel: make(chan struct{}), + newEpochChan: make(chan uint64), + newEpochHandlers: &newEpochHandlers{ + cancelFunc: func() {}, + handlers: []newEpochHandler{ + s.collectExpiredLocks, + s.collectExpiredObjects, + s.collectExpiredTombstones, + s.collectExpiredMetrics, }, }, } @@ -216,8 +214,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - if s.cfg.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.cfg.refillMetabaseWorkersCount) + if s.refillMetabaseWorkersCount > 0 { + eg.SetLimit(s.refillMetabaseWorkersCount) } var completedCount uint64 @@ -365,6 +363,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object // Close releases all Shard's components. func (s *Shard) Close(ctx context.Context) error { + unlock := s.lockExclusive() if s.rb != nil { s.rb.Stop(ctx, s.log) } @@ -390,6 +389,14 @@ func (s *Shard) Close(ctx context.Context) error { } } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + + unlock() + + // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. + // So to prevent deadlock GC stopping is outside of exclusive lock. // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { s.gc.stop(ctx) @@ -445,6 +452,10 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { return err } } + if c.opsLimiter != nil { + s.opsLimiter.Close() + s.opsLimiter = c.opsLimiter + } return s.setMode(ctx, c.info.Mode) } diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go index b3bc6a30b..8dc1f0522 100644 --- a/pkg/local_object_storage/shard/count.go +++ b/pkg/local_object_storage/shard/count.go @@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + cc, err := s.metaBase.ObjectCounters() if err != nil { return 0, err diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 55231b032..0101817a8 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -54,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del return DeleteRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return DeleteRes{}, err + } + defer release() + result := DeleteRes{} for _, addr := range prm.addr { select { diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 82ce48dde..2c11b6b01 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -53,10 +53,6 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { )) defer span.End() - var exists bool - var locked bool - var err error - s.m.RLock() defer s.m.RUnlock() @@ -64,7 +60,18 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { return ExistsRes{}, ErrShardDisabled } else if s.info.EvacuationInProgress { return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } else if s.info.Mode.NoMetabase() { + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ExistsRes{}, err + } + defer release() + + var exists bool + var locked bool + + if s.info.Mode.NoMetabase() { var p common.ExistsPrm p.Address = prm.Address diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 1b218a372..a262a52cb 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -6,11 +6,13 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -31,41 +33,14 @@ type TombstoneSource interface { IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool } -// Event represents class of external events. -type Event interface { - typ() eventType -} +type newEpochHandler func(context.Context, uint64) -type eventType int - -const ( - _ eventType = iota - eventNewEpoch -) - -type newEpoch struct { - epoch uint64 -} - -func (e newEpoch) typ() eventType { - return eventNewEpoch -} - -// EventNewEpoch returns new epoch event. -func EventNewEpoch(e uint64) Event { - return newEpoch{ - epoch: e, - } -} - -type eventHandler func(context.Context, Event) - -type eventHandlers struct { +type newEpochHandlers struct { prevGroup sync.WaitGroup cancelFunc context.CancelFunc - handlers []eventHandler + handlers []newEpochHandler } type gcRunResult struct { @@ -107,10 +82,10 @@ type gc struct { remover func(context.Context) gcRunResult - // eventChan is used only for listening for the new epoch event. + // newEpochChan is used only for listening for the new epoch event. // It is ok to keep opened, we are listening for context done when writing in it. - eventChan chan Event - mEventHandler map[eventType]*eventHandlers + newEpochChan chan uint64 + newEpochHandlers *newEpochHandlers } type gcCfg struct { @@ -140,16 +115,8 @@ func defaultGCCfg() gcCfg { } func (gc *gc) init(ctx context.Context) { - sz := 0 - - for _, v := range gc.mEventHandler { - sz += len(v.handlers) - } - - if sz > 0 { - gc.workerPool = gc.workerPoolInit(sz) - } - + gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) gc.wg.Add(2) go gc.tickRemover(ctx) go gc.listenEvents(ctx) @@ -166,7 +133,7 @@ func (gc *gc) listenEvents(ctx context.Context) { case <-ctx.Done(): gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return - case event, ok := <-gc.eventChan: + case event, ok := <-gc.newEpochChan: if !ok { gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return @@ -177,38 +144,33 @@ func (gc *gc) listenEvents(ctx context.Context) { } } -func (gc *gc) handleEvent(ctx context.Context, event Event) { - v, ok := gc.mEventHandler[event.typ()] - if !ok { - return - } - - v.cancelFunc() - v.prevGroup.Wait() +func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() var runCtx context.Context - runCtx, v.cancelFunc = context.WithCancel(ctx) + runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) - v.prevGroup.Add(len(v.handlers)) + gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) - for i := range v.handlers { + for i := range gc.newEpochHandlers.handlers { select { case <-ctx.Done(): return default: } - h := v.handlers[i] + h := gc.newEpochHandlers.handlers[i] err := gc.workerPool.Submit(func() { - defer v.prevGroup.Done() - h(runCtx, event) + defer gc.newEpochHandlers.prevGroup.Done() + h(runCtx, epoch) }) if err != nil { gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.Error(err), ) - v.prevGroup.Done() + gc.newEpochHandlers.prevGroup.Done() } } } @@ -265,6 +227,9 @@ func (gc *gc) stop(ctx context.Context) { gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() + + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() } // iterates over metabase and deletes objects @@ -289,28 +254,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) - buf := make([]oid.Address, 0, s.rmBatchSize) - - var iterPrm meta.GarbageIterationPrm - iterPrm.SetHandler(func(g meta.GarbageObject) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - buf = append(buf, g.Address()) - - if len(buf) == s.rmBatchSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - // iterate over metabase's objects with GC mark - // (no more than s.rmBatchSize objects) - err := s.metaBase.IterateOverGarbage(ctx, iterPrm) + buf, err := s.getGarbage(ctx) if err != nil { s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, zap.Error(err), @@ -342,13 +286,46 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } +func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + + buf := make([]oid.Address, 0, s.rmBatchSize) + + var iterPrm meta.GarbageIterationPrm + iterPrm.SetHandler(func(g meta.GarbageObject) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + buf = append(buf, g.Address()) + + if len(buf) == s.rmBatchSize { + return meta.ErrInterruptIterator + } + + return nil + }) + + if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { + return nil, err + } + + return buf, nil +} + func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) + workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) + batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) return } -func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { +func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -356,8 +333,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -366,7 +343,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { batch = append(batch, o.Address()) @@ -414,24 +391,25 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } + s.handleExpiredObjectsUnsafe(ctx, expired) +} + +func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { + select { + case <-ctx.Done(): + return + default: + } + expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) return } - var inhumePrm meta.InhumePrm - - inhumePrm.SetAddresses(expired...) - inhumePrm.SetGCMark() - - // inhume the collected objects - res, err := s.metaBase.Inhume(ctx, inhumePrm) + res, err := s.inhumeGC(ctx, expired) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) return } @@ -449,6 +427,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) } func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + result := make([]oid.Address, 0, len(source)) parentToChildren, err := s.metaBase.GetChildren(ctx, source) if err != nil { @@ -462,7 +446,20 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) return result, nil } -func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { +func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return meta.InhumeRes{}, err + } + defer release() + + var inhumePrm meta.InhumePrm + inhumePrm.SetAddresses(addrs...) + inhumePrm.SetGCMark() + return s.metaBase.Inhume(ctx, inhumePrm) +} + +func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -470,7 +467,6 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) }() - epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) @@ -503,11 +499,18 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { return } - err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + var release qos.ReleaseFunc + release, err = s.opsLimiter.ReadRequest(ctx) + if err != nil { + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + s.m.RUnlock() + return + } + err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release() if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() - return } @@ -535,7 +538,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } -func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { +func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -543,8 +546,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -554,14 +557,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() == objectSDK.TypeLock { batch = append(batch, o.Address()) if len(batch) == batchSize { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) batch = make([]oid.Address, 0, batchSize) @@ -575,7 +578,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { if len(batch) > 0 { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) } @@ -596,7 +599,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo return ErrDegradedMode } - err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { select { case <-ctx.Done(): return meta.ErrInterruptIterator @@ -612,12 +621,11 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo } func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err } + defer release() return s.metaBase.FilterExpired(ctx, epoch, addresses) } @@ -634,12 +642,15 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston return } - res, err := s.metaBase.InhumeTombstones(ctx, tss) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.InhumeTombstones(ctx, tss) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) return } @@ -659,14 +670,22 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // HandleExpiredLocks unlocks all objects which were locked by lockers. // If successful, marks lockers themselves as garbage. func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return + } + + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } unlocked, err := s.metaBase.FreeLockedBy(lockers) + release() if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.Error(err), - ) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } @@ -674,13 +693,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] var pInhume meta.InhumePrm pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - - res, err := s.metaBase.Inhume(ctx, pInhume) + release, err = s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.Inhume(ctx, pInhume) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) return } @@ -710,36 +731,40 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc return } - s.handleExpiredObjects(ctx, expiredUnlocked) + s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) } // HandleDeletedLocks unlocks all objects which were locked by lockers. func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } - _, err := s.metaBase.FreeLockedBy(lockers) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + _, err = s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } } -// NotificationChannel returns channel for shard events. -func (s *Shard) NotificationChannel() chan<- Event { - return s.gc.eventChan +// NotificationChannel returns channel for new epoch events. +func (s *Shard) NotificationChannel() chan<- uint64 { + return s.gc.newEpochChan } -func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { +func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") defer span.End() - epoch := e.(newEpoch).epoch - s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) @@ -748,7 +773,13 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { } func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroSizeContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return @@ -760,7 +791,13 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui } func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroCountContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 9998bbae2..54d2f1510 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -37,7 +37,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index e3670b441..f512a488a 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { require.NoError(t, err) epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) var getPrm GetPrm getPrm.SetAddress(objectCore.AddressOf(obj)) @@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { require.True(t, errors.As(err, &splitInfoError), "split info must be provided") epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 05823c62b..28f8912be 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -111,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return c.Get(ctx, prm.addr) } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return GetRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index ff57e3bf9..34b8290d6 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -81,6 +81,12 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { headParams.SetAddress(prm.addr) headParams.SetRaw(prm.raw) + release, limitErr := s.opsLimiter.ReadRequest(ctx) + if limitErr != nil { + return HeadRes{}, limitErr + } + defer release() + var res meta.GetRes res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 26492cf01..7391adef2 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { } shardID := s.info.ID.String() - s.cfg.metricsWriter.SetShardID(shardID) + s.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } @@ -61,6 +61,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { if s.pilorama != nil { s.pilorama.SetParentID(s.info.ID.String()) } + s.opsLimiter.SetParentID(s.info.ID.String()) if len(idFromMetabase) == 0 && !modeDegraded { if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 9d5f66063..c0fd65f4b 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -81,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return InhumeRes{}, err + } + defer release() + if s.hasWriteCache() { for i := range prm.target { _ = s.writeCache.Delete(ctx, prm.target[i]) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 7bc5ead1d..af87981ca 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -106,6 +106,12 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, err + } + defer release() + lst, err := s.metaBase.Containers(ctx) if err != nil { return res, fmt.Errorf("list stored containers: %w", err) @@ -145,6 +151,12 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo return ListContainersRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListContainersRes{}, err + } + defer release() + containers, err := s.metaBase.Containers(ctx) if err != nil { return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) @@ -173,6 +185,12 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List return ListWithCursorRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListWithCursorRes{}, err + } + defer release() + var metaPrm meta.ListPrm metaPrm.SetCount(prm.count) metaPrm.SetCursor(prm.cursor) @@ -202,9 +220,15 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverContainersPrm metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverContainers(ctx, metaPrm) + err = s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over containers: %w", err) } @@ -227,11 +251,17 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverObjectsInContainerPrm metaPrm.ContainerID = prm.ContainerID metaPrm.ObjectType = prm.ObjectType metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over objects: %w", err) } @@ -251,6 +281,12 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + var metaPrm meta.CountAliveObjectsInContainerPrm metaPrm.ObjectType = prm.ObjectType metaPrm.ContainerID = prm.ContainerID diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 31ca16aa1..9c392fdac 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked [] return ErrDegradedMode } - err := s.metaBase.Lock(ctx, idCnr, locker, locked) + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() + var prm meta.IsLockedPrm prm.SetAddress(addr) @@ -86,5 +98,12 @@ func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error if m.NoMetabase() { return nil, ErrDegradedMode } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + return s.metaBase.GetLocks(ctx, addr) } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 5caf3641f..3878a65cd 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -28,9 +28,10 @@ func TestShard_Lock(t *testing.T) { var sh *Shard rootPath := t.TempDir() + l := logger.NewLoggerWrapper(zap.NewNop()) opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + WithLogger(l), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 3f23111af..f8cb00a31 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -67,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var res common.PutRes + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return PutRes{}, err + } + defer release() + // exist check are not performed there, these checks should be executed // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 701268820..443689104 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -131,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { return obj, nil } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return RngRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 146e834cc..06fe9f511 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -79,7 +79,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 10eb51a28..20f1f2b6f 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -6,10 +6,13 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -18,37 +21,9 @@ import ( var ErrRebuildInProgress = errors.New("shard rebuild in progress") -type RebuildWorkerLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -type rebuildLimiter struct { - semaphore chan struct{} -} - -func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter { - return &rebuildLimiter{ - semaphore: make(chan struct{}, workersCount), - } -} - -func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error { - select { - case l.semaphore <- struct{}{}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (l *rebuildLimiter) ReleaseWorkSlot() { - <-l.semaphore -} - type rebuildTask struct { - limiter RebuildWorkerLimiter - fillPercent int + concurrencyLimiter common.RebuildLimiter + fillPercent int } type rebuilder struct { @@ -88,14 +63,14 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D if !ok { continue } - runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter) + runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter) } } }() } func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger, - fillPercent int, limiter RebuildWorkerLimiter, + fillPercent int, concLimiter common.RebuildLimiter, ) { select { case <-ctx.Done(): @@ -103,21 +78,22 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo default: } log.Info(ctx, logs.BlobstoreRebuildStarted) - if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) } } -func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int, +func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int, ) error { select { case <-ctx.Done(): return ctx.Err() case r.tasks <- rebuildTask{ - limiter: limiter, - fillPercent: fillPercent, + concurrencyLimiter: limiter, + fillPercent: fillPercent, }: return nil default: @@ -166,7 +142,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres } type RebuildPrm struct { - ConcurrencyLimiter RebuildWorkerLimiter + ConcurrencyLimiter common.ConcurrencyLimiter TargetFillPercent uint32 } @@ -188,5 +164,30 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error { return ErrDegradedMode } - return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent)) + limiter := &rebuildLimiter{ + concurrencyLimiter: p.ConcurrencyLimiter, + rateLimiter: s.opsLimiter, + } + return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent)) +} + +var _ common.RebuildLimiter = (*rebuildLimiter)(nil) + +type rebuildLimiter struct { + concurrencyLimiter common.ConcurrencyLimiter + rateLimiter qos.Limiter +} + +func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + return r.concurrencyLimiter.AcquireWorkSlot(ctx) +} + +func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.ReadRequest(ctx) + return common.ReleaseFunc(release), err +} + +func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.WriteRequest(ctx) + return common.ReleaseFunc(release), err } diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index c7c7e11c2..fbc751e26 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -60,6 +60,12 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, nil + } + defer release() + var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 1eb7f14d0..d89b56266 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,6 +7,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -98,6 +99,8 @@ type cfg struct { reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider + + opsLimiter qos.Limiter } func defaultCfg() *cfg { @@ -109,6 +112,7 @@ func defaultCfg() *cfg { zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, + opsLimiter: qos.NewNoopLimiter(), } } @@ -201,7 +205,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option { func WithLogger(l *logger.Logger) Option { return func(c *cfg) { c.log = l - c.gcCfg.log = l + c.gcCfg.log = l.WithTag(logger.TagGC) } } @@ -214,7 +218,7 @@ func WithWriteCache(use bool) Option { // hasWriteCache returns bool if write cache exists on shards. func (s *Shard) hasWriteCache() bool { - return s.cfg.useWriteCache + return s.useWriteCache } // NeedRefillMetabase returns true if metabase is needed to be refilled. @@ -368,16 +372,22 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { } } -func (s *Shard) fillInfo() { - s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() - s.cfg.info.Mode = s.GetMode() +func WithLimiter(l qos.Limiter) Option { + return func(c *cfg) { + c.opsLimiter = l + } +} - if s.cfg.useWriteCache { - s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() +func (s *Shard) fillInfo() { + s.info.MetaBaseInfo = s.metaBase.DumpInfo() + s.info.BlobStorInfo = s.blobStor.DumpInfo() + s.info.Mode = s.GetMode() + + if s.useWriteCache { + s.info.WriteCacheInfo = s.writeCache.DumpInfo() } if s.pilorama != nil { - s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() + s.info.PiloramaInfo = s.pilorama.DumpInfo() } } @@ -444,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) { s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) } - s.cfg.metricsWriter.SetMode(s.info.Mode) + s.metricsWriter.SetMode(s.info.Mode) } // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - s.cfg.metricsWriter.IncObjectCounter(physical) - s.cfg.metricsWriter.IncObjectCounter(logical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + s.metricsWriter.IncObjectCounter(physical) + s.metricsWriter.IncObjectCounter(logical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) if isUser { - s.cfg.metricsWriter.IncObjectCounter(user) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) + s.metricsWriter.IncObjectCounter(user) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { if v > 0 { - s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) + s.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { if v > 0 { - s.cfg.metricsWriter.SetObjectCounter(typ, v) + s.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { for cnrID, count := range byCnr { if count.Phy > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) } if count.Logic > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) } if count.User > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) } } } func (s *Shard) addToContainerSize(cnr string, size int64) { if size != 0 { - s.cfg.metricsWriter.AddToContainerSize(cnr, size) + s.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { if size != 0 { - s.cfg.metricsWriter.AddToPayloadSize(size) + s.metricsWriter.AddToPayloadSize(size) } } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index f9ee34488..84be71c4d 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -60,7 +60,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index 01a014cec..db361a8bd 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeMove(ctx, d, treeID, m) } @@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } @@ -103,6 +113,11 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } @@ -130,6 +145,11 @@ func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) } @@ -157,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } @@ -182,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Meta{}, 0, err + } + defer release() return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } @@ -207,11 +237,16 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } // TreeSortedByFilename implements the pilorama.Forest interface. -func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), @@ -231,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID if s.info.Mode.NoMetabase() { return nil, last, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, last, err + } + defer release() return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) } @@ -256,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Move{}, err + } + defer release() return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } @@ -280,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeDrop(ctx, cid, treeID) } @@ -303,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeList(ctx, cid) } @@ -326,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u if s.pilorama == nil { return 0, ErrPiloramaDisabled } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeHeight(ctx, cid, treeID) } @@ -350,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() return s.pilorama.TreeExists(ctx, cid, treeID) } @@ -378,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } @@ -402,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } @@ -423,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeListTrees(ctx, prm) } @@ -452,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index f655e477a..9edb89df8 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -67,6 +67,12 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) } @@ -124,6 +130,13 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { close(started) defer cleanup() + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) + return + } + defer release() + s.log.Info(ctx, logs.StartedWritecacheSealAsync) if err := s.writeCache.Seal(ctx, prm); err != nil { s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) @@ -138,5 +151,11 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { return nil } } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Seal(ctx, prm) } diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index b99d73d3a..ee709ea73 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -6,6 +6,7 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -61,6 +62,7 @@ func New(opts ...Option) Cache { maxCacheSize: defaultMaxCacheSize, metrics: DefaultMetrics(), flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize, + qosLimiter: qos.NewNoopLimiter(), }, } diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index d9e34ceab..893d27ba2 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -14,6 +15,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -35,6 +37,7 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String()) fl := newFlushLimiter(c.flushSizeLimit) c.wg.Add(1) go func() { @@ -64,7 +67,13 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { continue } - err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { + release, err := c.qosLimiter.ReadRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err)) + c.modeMtx.RUnlock() + continue + } + err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { if err := fl.acquire(oi.DataSize); err != nil { return err } @@ -79,11 +88,15 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { return ctx.Err() } }) + release() if err != nil { c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) } c.modeMtx.RUnlock() + + // counter changed by fstree + c.estimateCacheSize() case <-ctx.Done(): return } @@ -107,6 +120,12 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) { func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) { defer fl.release(objInfo.size) + release, err := c.qosLimiter.WriteRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err)) + return + } + defer release() res, err := c.fsTree.Get(ctx, common.GetPrm{ Address: objInfo.addr, }) diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go index ddc4101be..0e020b36e 100644 --- a/pkg/local_object_storage/writecache/limiter.go +++ b/pkg/local_object_storage/writecache/limiter.go @@ -3,6 +3,8 @@ package writecache import ( "errors" "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) var errLimiterClosed = errors.New("acquire failed: limiter closed") @@ -45,17 +47,11 @@ func (l *flushLimiter) release(size uint64) { l.cond.L.Lock() defer l.cond.L.Unlock() - if l.size >= size { - l.size -= size - } else { - panic("flushLimiter: invalid size") - } + assert.True(l.size >= size, "flushLimiter: invalid size") + l.size -= size - if l.count > 0 { - l.count-- - } else { - panic("flushLimiter: invalid count") - } + assert.True(l.count > 0, "flushLimiter: invalid count") + l.count-- l.cond.Broadcast() } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index f2957fe98..a4f98ad06 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -3,8 +3,8 @@ package writecache import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Option represents write-cache configuration option. @@ -38,12 +38,14 @@ type options struct { disableBackgroundFlush bool // flushSizeLimit is total size of flushing objects. flushSizeLimit uint64 + // qosLimiter used to limit flush RPS. + qosLimiter qos.Limiter } // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = log.With(zap.String("component", "WriteCache")) + o.log = log } } @@ -136,3 +138,9 @@ func WithFlushSizeLimit(v uint64) Option { o.flushSizeLimit = v } } + +func WithQoSLimiter(l qos.Limiter) Option { + return func(o *options) { + o.qosLimiter = l + } +} diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index 7da5c4d3a..2fbf50913 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -2,6 +2,7 @@ package writecache import ( "context" + "fmt" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -59,7 +60,15 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro // putBig writes object to FSTree and pushes it to the flush workers queue. func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { - if !c.hasEnoughSpaceFS() { + if prm.RawData == nil { // foolproof: RawData should be marshalled by shard. + data, err := prm.Object.Marshal() + if err != nil { + return fmt.Errorf("cannot marshal object: %w", err) + } + prm.RawData = data + } + size := uint64(len(prm.RawData)) + if !c.hasEnoughSpace(size) { return ErrOutOfSpace } diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go index 44caa2603..7a52d3672 100644 --- a/pkg/local_object_storage/writecache/state.go +++ b/pkg/local_object_storage/writecache/state.go @@ -7,10 +7,6 @@ func (c *cache) estimateCacheSize() (uint64, uint64) { return count, size } -func (c *cache) hasEnoughSpaceFS() bool { - return c.hasEnoughSpace(c.maxObjectSize) -} - func (c *cache) hasEnoughSpace(objectSize uint64) bool { count, size := c.estimateCacheSize() if c.maxCacheCount > 0 && count+1 > c.maxCacheCount { diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 70b17eb8e..7ed511318 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -52,7 +52,7 @@ type Cache interface { // MainStorage is the interface of the underlying storage of Cache implementations. type MainStorage interface { - Compressor() *compression.Config + Compressor() *compression.Compressor Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) Put(context.Context, common.PutPrm) (common.PutRes, error) } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 10ded5142..aab058d27 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -9,6 +9,7 @@ import ( "sync/atomic" "time" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" @@ -60,6 +61,9 @@ type Client struct { rpcActor *actor.Actor // neo-go RPC actor gasToken *nep17.Token // neo-go GAS token wrapper rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper + nnsHash util.Uint160 // NNS contract hash + + nnsReader *nnsClient.ContractReader // NNS contract wrapper acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -94,27 +98,12 @@ type Client struct { type cache struct { m sync.RWMutex - nnsHash *util.Uint160 gKey *keys.PublicKey txHeights *lru.Cache[util.Uint256, uint32] metrics metrics.MorphCacheMetrics } -func (c *cache) nns() *util.Uint160 { - c.m.RLock() - defer c.m.RUnlock() - - return c.nnsHash -} - -func (c *cache) setNNSHash(nnsHash util.Uint160) { - c.m.Lock() - defer c.m.Unlock() - - c.nnsHash = &nnsHash -} - func (c *cache) groupKey() *keys.PublicKey { c.m.RLock() defer c.m.RUnlock() @@ -133,7 +122,6 @@ func (c *cache) invalidate() { c.m.Lock() defer c.m.Unlock() - c.nnsHash = nil c.gKey = nil c.txHeights.Purge() } @@ -163,20 +151,6 @@ func (e *notHaltStateError) Error() string { ) } -// implementation of error interface for FrostFS-specific errors. -type frostfsError struct { - err error -} - -func (e frostfsError) Error() string { - return fmt.Sprintf("frostfs error: %v", e.err) -} - -// wraps FrostFS-specific error into frostfsError. Arg must not be nil. -func wrapFrostFSError(err error) error { - return frostfsError{err} -} - // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. @@ -213,7 +187,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F // If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. // batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. // The default batchSize is 100, the default limit from neo-go. -func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error { +func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error { start := time.Now() success := false defer func() { @@ -240,7 +214,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int if err != nil { return err } else if val.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return ¬HaltStateError{state: val.State, exception: val.FaultException} } arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) @@ -262,10 +236,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int }() // Batch size for TraverseIterator() can restricted on the server-side. - traverseBatchSize := batchSize - if invoker.DefaultIteratorResultItems < traverseBatchSize { - traverseBatchSize = invoker.DefaultIteratorResultItems - } + traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems) for { items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize) if err != nil { @@ -307,7 +278,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) ( } if val.State != HaltState { - return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} } success = true @@ -594,6 +565,7 @@ func (c *Client) setActor(act *actor.Actor) { c.rpcActor = act c.gasToken = nep17.New(act, gas.Hash) c.rolemgmt = rolemgmt.New(act) + c.nnsReader = nnsClient.NewReader(act, c.nnsHash) } func (c *Client) GetActor() *actor.Actor { diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index d061747bb..e4dcd0db7 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -145,6 +145,11 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er if cli.client == nil { return nil, ErrNoHealthyEndpoint } + cs, err := cli.client.GetContractStateByID(nnsContractID) + if err != nil { + return nil, fmt.Errorf("resolve nns hash: %w", err) + } + cli.nnsHash = cs.Hash cli.setActor(act) go cli.closeWaiter(ctx) diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index fcdb70e3f..3f6aed506 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -2,7 +2,6 @@ package netmap import ( "context" - "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -26,44 +25,24 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { - objectSize, err := c.readUInt64Config(ctx, MaxObjectSizeConfig) - if err != nil { - return 0, err - } - - return objectSize, nil + return c.readUInt64Config(ctx, MaxObjectSizeConfig) } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { - epochDuration, err := c.readUInt64Config(ctx, EpochDurationConfig) - if err != nil { - return 0, err - } - - return epochDuration, nil + return c.readUInt64Config(ctx, EpochDurationConfig) } // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, ContainerFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, ContainerFeeConfig) } // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, ContainerAliasFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, ContainerAliasFeeConfig) } // HomomorphicHashDisabled returns global configuration value of homomorphic hashing @@ -77,23 +56,13 @@ func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, IrCandidateFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, IrCandidateFeeConfig) } // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, WithdrawFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, WithdrawFeeConfig) } // MaintenanceModeAllowed reads admission of "maintenance" state from the @@ -106,29 +75,27 @@ func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { } func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { - v, err := c.config(ctx, []byte(key), IntegerAssert) + v, err := c.config(ctx, []byte(key)) if err != nil { return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) } - // IntegerAssert is guaranteed to return int64 if the error is nil. - return uint64(v.(int64)), nil + bi, err := v.TryInteger() + if err != nil { + return 0, err + } + return bi.Uint64(), nil } // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { - v, err := c.config(ctx, []byte(key), BoolAssert) + v, err := c.config(ctx, []byte(key)) if err != nil { - if errors.Is(err, ErrConfigNotFound) { - return false, nil - } - return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } - // BoolAssert is guaranteed to return bool if the error is nil. - return v.(bool), nil + return v.TryBool() } // SetConfigPrm groups parameters of SetConfig operation. @@ -277,15 +244,11 @@ func bytesToBool(val []byte) bool { return false } -// ErrConfigNotFound is returned when the requested key was not found -// in the network config (returned value is `Null`). -var ErrConfigNotFound = errors.New("config value not found") - // config performs the test invoke of get config value // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.Item) (any, error)) (any, error) { +func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) @@ -301,26 +264,7 @@ func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.I configMethod, ln) } - if _, ok := items[0].(stackitem.Null); ok { - return nil, ErrConfigNotFound - } - - return assert(items[0]) -} - -// IntegerAssert converts stack item to int64. -func IntegerAssert(item stackitem.Item) (any, error) { - return client.IntFromStackItem(item) -} - -// StringAssert converts stack item to string. -func StringAssert(item stackitem.Item) (any, error) { - return client.StringFromStackItem(item) -} - -// BoolAssert converts stack item to bool. -func BoolAssert(item stackitem.Item) (any, error) { - return client.BoolFromStackItem(item) + return items[0], nil } // iterateRecords iterates over all config records and passes them to f. diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index f292dccf1..bc00eb889 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -8,14 +8,12 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) const ( @@ -37,12 +35,8 @@ const ( NNSPolicyContractName = "policy.frostfs" ) -var ( - // ErrNNSRecordNotFound means that there is no such record in NNS contract. - ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") - - errEmptyResultStack = errors.New("returned result stack is empty") -) +// ErrNNSRecordNotFound means that there is no such record in NNS contract. +var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") // NNSAlphabetContractName returns contract name of the alphabet contract in NNS // based on alphabet index. @@ -61,97 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - nnsHash, err := c.NNSHash() - if err != nil { - return util.Uint160{}, err - } - - sh, err = nnsResolve(c.client, nnsHash, name) + sh, err = nnsResolve(c.nnsReader, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) } return sh, nil } -// NNSHash returns NNS contract hash. -func (c *Client) NNSHash() (util.Uint160, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint160{}, ErrConnectionLost - } - - success := false - startedAt := time.Now() - - defer func() { - c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt)) - }() - - nnsHash := c.cache.nns() - - if nnsHash == nil { - cs, err := c.client.GetContractStateByID(nnsContractID) - if err != nil { - return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err) - } - - c.cache.setNNSHash(cs.Hash) - nnsHash = &cs.Hash - } - success = true - return *nnsHash, nil -} - -func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { - found, err := exists(c, nnsHash, domain) +func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { + available, err := r.IsAvailable(domain) if err != nil { return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } - if !found { + if available { return nil, ErrNNSRecordNotFound } - result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - { - Type: smartcontract.IntegerType, - Value: big.NewInt(int64(nns.TXT)), - }, - }, nil) - if err != nil { - return nil, err - } - if result.State != vmstate.Halt.String() { - return nil, fmt.Errorf("invocation failed: %s", result.FaultException) - } - if len(result.Stack) == 0 { - return nil, errEmptyResultStack - } - return result.Stack[0], nil + return r.Resolve(domain, big.NewInt(int64(nns.TXT))) } -func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) { - res, err := nnsResolveItem(c, nnsHash, domain) +func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { + arr, err := nnsResolveItem(r, domain) if err != nil { return util.Uint160{}, err } - // Parse the result of resolving NNS record. - // It works with multiple formats (corresponding to multiple NNS versions). - // If array of hashes is provided, it returns only the first one. - if arr, ok := res.Value().([]stackitem.Item); ok { - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") - } - res = arr[0] + if len(arr) == 0 { + return util.Uint160{}, errors.New("NNS record is missing") } - bs, err := res.TryBytes() + bs, err := arr[0].TryBytes() if err != nil { return util.Uint160{}, fmt.Errorf("malformed response: %w", err) } @@ -171,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti return util.Uint160{}, errors.New("no valid hashes are found") } -func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) { - result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - }, nil) - if err != nil { - return false, err - } - - if len(result.Stack) == 0 { - return false, errEmptyResultStack - } - - res := result.Stack[0] - - available, err := res.TryBool() - if err != nil { - return false, fmt.Errorf("malformed response: %w", err) - } - - // not available means that it is taken - // and, therefore, exists - return !available, nil -} - // SetGroupSignerScope makes the default signer scope include all FrostFS contracts. // Should be called for side-chain client only. func (c *Client) SetGroupSignerScope() error { @@ -241,18 +147,12 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - nnsHash, err := c.NNSHash() + arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) if err != nil { return nil, err } - item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) - if err != nil { - return nil, err - } - - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) == 0 { + if len(arr) == 0 { return nil, errors.New("NNS record is missing") } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index dbd58a53a..448702613 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -38,8 +38,7 @@ type ( alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - notary util.Uint160 - proxy util.Uint160 + proxy util.Uint160 } notaryCfg struct { @@ -102,7 +101,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, alphabetSource: cfg.alphabetSource, - notary: notary.Hash, } c.notary = notaryCfg @@ -188,7 +186,7 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, - c.notary.notary, + notary.Hash, big.NewInt(int64(amount)), []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { @@ -463,7 +461,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { if r.State != vmstate.Halt.String() { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.ValidUntilBlock = until @@ -610,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) err := multisigAccount.ConvertMultisig(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("convert account to inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) } } else { // alphabet multisig redeem script is @@ -619,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB // inner ring multiaddress witness multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("make inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) } } diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index f68d39beb..f7b6705a8 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) { func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { return func(r *result.Invoke, t *transaction.Transaction) error { if r.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.SystemFee += add diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 5b9d2cbe0..87fcf84b8 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -47,5 +47,5 @@ func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) err if res.VMState.HasFlag(vmstate.Halt) { return nil } - return wrapFrostFSError(¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}) + return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} } diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index 40f5984a9..b11973646 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -199,8 +199,8 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { // neo-go API) // // this check prevents notary flow recursion - if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || - bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version + if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && + !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version return ErrTXAlreadyHandled } @@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 - bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 + !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } diff --git a/pkg/network/address.go b/pkg/network/address.go index cb83a813d..4643eef15 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -2,11 +2,11 @@ package network import ( "errors" - "fmt" "net" "net/url" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -44,11 +44,9 @@ func (a Address) equal(addr Address) bool { // See also FromString. func (a Address) URIAddr() string { _, host, err := manet.DialArgs(a.ma) - if err != nil { - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - panic(fmt.Errorf("could not get host addr: %w", err)) - } + // the only correct way to construct Address is AddressFromString + // which makes this error appear unexpected + assert.NoError(err, "could not get host addr") if !a.IsTLSEnabled() { return host diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 1bcb83259..54c1e18fb 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -7,10 +7,12 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -62,12 +64,16 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpcOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), diff --git a/pkg/network/group.go b/pkg/network/group.go index 9843b14d4..0044fb2d4 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,6 +3,8 @@ package network import ( "errors" "fmt" + "iter" + "slices" "sort" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -67,9 +69,8 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // IterateAddresses must iterate over network addresses and pass each one - // to the handler until it returns true. - IterateAddresses(func(string) bool) + // Addresses must return an iterator over network addresses. + Addresses() iter.Seq[string] // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int @@ -130,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { // iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f // until 1st parsing failure or f's error. func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - iter.IterateAddresses(func(s string) bool { + for s := range iter.Addresses() { var a Address err = a.FromString(s) if err != nil { - err = fmt.Errorf("could not parse address from string: %w", err) - return true + return fmt.Errorf("could not parse address from string: %w", err) } err = f(a) - - return err != nil - }) + if err != nil { + return err + } + } return } @@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) { // at least one common address. func (x AddressGroup) Intersects(x2 AddressGroup) bool { for i := range x { - for j := range x2 { - if x[i].equal(x2[j]) { - return true - } + if slices.ContainsFunc(x2, x[i].equal) { + return true } } diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go index 5b335fa52..d08264533 100644 --- a/pkg/network/group_test.go +++ b/pkg/network/group_test.go @@ -1,6 +1,8 @@ package network import ( + "iter" + "slices" "sort" "testing" @@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) { type testIterator []string -func (t testIterator) IterateAddresses(f func(string) bool) { - for i := range t { - f(t[i]) - } +func (t testIterator) Addresses() iter.Seq[string] { + return slices.Values(t) } func (t testIterator) NumberOfAddresses() int { diff --git a/pkg/network/validation.go b/pkg/network/validation.go index 92f650119..b5157f28f 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -2,6 +2,7 @@ package network import ( "errors" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -34,8 +35,8 @@ var ( // MultiAddressIterator. type NodeEndpointsIterator netmap.NodeInfo -func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) +func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() } func (x NodeEndpointsIterator) NumberOfAddresses() int { diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go index e64f9a8d1..1d485321c 100644 --- a/pkg/services/apemanager/errors/errors.go +++ b/pkg/services/apemanager/errors/errors.go @@ -9,3 +9,9 @@ func ErrAPEManagerAccessDenied(reason string) error { err.WriteReason(reason) return err } + +func ErrAPEManagerInvalidArgument(msg string) error { + err := new(apistatus.InvalidArgument) + err.SetMessage(msg) + return err +} diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index 58922fede..fc08fe569 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -81,7 +81,7 @@ var _ Server = (*Service)(nil) func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { var cidSDK cidSDK.ID if err := cidSDK.DecodeString(cid); err != nil { - return fmt.Errorf("invalid CID format: %w", err) + return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err)) } isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) if err != nil { @@ -101,7 +101,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw()) if err != nil { - return nil, err + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error()) } if len(chain.ID) == 0 { const randomIDLength = 10 @@ -122,7 +122,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) @@ -158,7 +158,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) @@ -193,7 +193,7 @@ func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRe } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target) @@ -227,11 +227,11 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK } sig := vh.GetBodySignature() if sig == nil { - return nil, errEmptyBodySignature + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error()) } key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) if err != nil { - return nil, fmt.Errorf("invalid signature key: %w", err) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err)) } return key, nil diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index c9b0b7363..eb6263320 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -20,7 +20,6 @@ import ( ) var ( - errInvalidTargetType = errors.New("bearer token defines non-container target override") errBearerExpired = errors.New("bearer token has expired") errBearerInvalidSignature = errors.New("bearer token has invalid signature") errBearerInvalidContainerID = errors.New("bearer token was created for another container") @@ -73,14 +72,22 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora // CheckAPE performs the common policy-engine check logic on a prepared request. func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { var cr policyengine.ChainRouter - if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { + if prm.BearerToken != nil { var err error if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { return fmt.Errorf("bearer validation error: %w", err) } - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) + if prm.BearerToken.Impersonate() { + cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) + } else { + override, isSet := prm.BearerToken.APEOverride() + if !isSet { + return errors.New("expected for override within bearer") + } + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) + if err != nil { + return fmt.Errorf("create chain router error: %w", err) + } } } else { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) @@ -126,19 +133,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe } // Check for ape overrides defined in the bearer token. - apeOverride := token.APEOverride() - if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { - return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) - } - - // Then check if container is either empty or equal to the container in the request. - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID + if apeOverride, isSet := token.APEOverride(); isSet { + switch apeOverride.Target.TargetType { + case ape.TargetTypeContainer: + var targetCnr cid.ID + err := targetCnr.DecodeString(apeOverride.Target.Name) + if err != nil { + return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) + } + if !cntID.Equals(targetCnr) { + return errBearerInvalidContainerID + } + default: + } } // Then check if container owner signed this token. @@ -150,8 +157,16 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe var usrSender user.ID user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner + // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's + // public key, but not the actual sender. + if !token.Impersonate() { + if !token.AssertUser(usrSender) { + return errBearerInvalidOwner + } + } else { + if !bearer.ResolveIssuer(*token).Equals(usrSender) { + return errBearerInvalidOwner + } } return nil diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index e1fbe3960..01bd825d7 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -655,10 +655,8 @@ func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err == nil { namespace = subject.Namespace - } else { - if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) - } + } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { + return "", fmt.Errorf("get subject error: %w", err) } return namespace, nil } diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index c2a4f88a6..0cfca71c1 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -35,8 +35,7 @@ func panicOnPrmValue(n string, v any) { // the parameterized private key. func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { // verify required parameters - switch { - case prm.healthChecker == nil: + if prm.healthChecker == nil { panicOnPrmValue("health checker", prm.healthChecker) } diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index bbf2cf0cc..0c4236d0e 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -32,6 +32,7 @@ const ( rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides" rpcDetachShards = "DetachShards" rpcStartShardRebuild = "StartShardRebuild" + rpcListShardsForObject = "ListShardsForObject" ) // HealthCheck executes ControlService.HealthCheck RPC. @@ -364,3 +365,22 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts . return wResp.message, nil } + +// ListShardsForObject executes ControlService.ListShardsForObject RPC. +func ListShardsForObject( + cli *client.Client, + req *ListShardsForObjectRequest, + opts ...client.CallOption, +) (*ListShardsForObjectResponse, error) { + wResp := newResponseWrapper[ListShardsForObjectResponse]() + + wReq := &requestWrapper{ + m: req, + } + err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...) + if err != nil { + return nil, err + } + + return wResp.message, nil +} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index 7469ea74e..f3ba9015e 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -220,7 +220,7 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest TreeId: treeID, Operation: &tree.LogMove{ ParentId: op.Parent, - Meta: op.Meta.Bytes(), + Meta: op.Bytes(), ChildId: op.Child, }, }, diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go new file mode 100644 index 000000000..39565ed50 --- /dev/null +++ b/pkg/services/control/server/list_shards_for_object.go @@ -0,0 +1,65 @@ +package control + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) { + err := s.isValidRequest(req) + if err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + var obj oid.ID + err = obj.DecodeString(req.GetBody().GetObjectId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + var cnr cid.ID + err = cnr.DecodeString(req.GetBody().GetContainerId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + resp := new(control.ListShardsForObjectResponse) + body := new(control.ListShardsForObjectResponse_Body) + resp.SetBody(body) + + var objAddr oid.Address + objAddr.SetContainer(cnr) + objAddr.SetObject(obj) + info, err := s.s.ListShardsForObject(ctx, objAddr) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if len(info) == 0 { + return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject) + } + + body.SetShard_ID(shardInfoToProto(info)) + + // Sign the response + if err := ctrlmessage.Sign(s.key, resp); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return resp, nil +} + +func shardInfoToProto(infos []shard.Info) [][]byte { + shardInfos := make([][]byte, 0, len(infos)) + for _, info := range infos { + shardInfos = append(shardInfos, *info.ID) + } + + return shardInfos +} diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 97ecf9a8c..4c539acfc 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -89,6 +89,9 @@ service ControlService { // StartShardRebuild starts shard rebuild process. rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse); + + // ListShardsForObject returns shard info where object is stored. + rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse); } // Health check request. @@ -729,3 +732,23 @@ message StartShardRebuildResponse { Signature signature = 2; } + +message ListShardsForObjectRequest { + message Body { + string object_id = 1; + string container_id = 2; + } + + Body body = 1; + Signature signature = 2; +} + +message ListShardsForObjectResponse { + message Body { + // List of the node's shards storing object. + repeated bytes shard_ID = 1; + } + + Body body = 1; + Signature signature = 2; +} diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 0b4e3cf32..44849d591 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -17303,3 +17303,727 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { in.Consumed() } } + +type ListShardsForObjectRequest_Body struct { + ObjectId string `json:"objectId"` + ContainerId string `json:"containerId"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.StringSize(1, x.ObjectId) + size += proto.StringSize(2, x.ContainerId) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ObjectId) != 0 { + mm.AppendString(1, x.ObjectId) + } + if len(x.ContainerId) != 0 { + mm.AppendString(2, x.ContainerId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body") + } + switch fc.FieldNum { + case 1: // ObjectId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ObjectId") + } + x.ObjectId = data + case 2: // ContainerId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + } + } + return nil +} +func (x *ListShardsForObjectRequest_Body) GetObjectId() string { + if x != nil { + return x.ObjectId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) { + x.ObjectId = v +} +func (x *ListShardsForObjectRequest_Body) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) { + x.ContainerId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"objectId\":" + out.RawString(prefix) + out.String(x.ObjectId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + out.String(x.ContainerId) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "objectId": + { + var f string + f = in.String() + x.ObjectId = f + } + case "containerId": + { + var f string + f = in.String() + x.ContainerId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectRequest struct { + Body *ListShardsForObjectRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectRequest) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) { + x.Body = v +} +func (x *ListShardsForObjectRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectRequest_Body + f = new(ListShardsForObjectRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse_Body struct { + Shard_ID [][]byte `json:"shardID"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.RepeatedBytesSize(1, x.Shard_ID) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + } + } + return nil +} +func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse struct { + Body *ListShardsForObjectResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectResponse) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) { + x.Body = v +} +func (x *ListShardsForObjectResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectResponse_Body + f = new(ListShardsForObjectResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index 987e08c59..045662ccf 100644 --- a/pkg/services/control/service_grpc.pb.go +++ b/pkg/services/control/service_grpc.pb.go @@ -41,6 +41,7 @@ const ( ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache" ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards" ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild" + ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject" ) // ControlServiceClient is the client API for ControlService service. @@ -95,6 +96,8 @@ type ControlServiceClient interface { DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) } type controlServiceClient struct { @@ -303,6 +306,15 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS return out, nil } +func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) { + out := new(ListShardsForObjectResponse) + err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ControlServiceServer is the server API for ControlService service. // All implementations should embed UnimplementedControlServiceServer // for forward compatibility @@ -355,6 +367,8 @@ type ControlServiceServer interface { DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) } // UnimplementedControlServiceServer should be embedded to have forward compatible implementations. @@ -427,6 +441,9 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented") } +func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented") +} // UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ControlServiceServer will @@ -835,6 +852,24 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListShardsForObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServiceServer).ListShardsForObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControlService_ListShardsForObject_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + // ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -930,6 +965,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "StartShardRebuild", Handler: _ControlService_StartShardRebuild_Handler, }, + { + MethodName: "ListShardsForObject", + Handler: _ControlService_ListShardsForObject_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/services/control/service.proto", diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 44101a153..1b92fdaad 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" @@ -46,10 +47,12 @@ type NetworkInfo interface { } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil { - // this should never happen, otherwise it programmers bug - panic("can't create netmap execution service") - } + // this should never happen, otherwise it's a programmer's bug + msg := "BUG: can't create netmap execution service" + assert.False(s == nil, msg, "node state is nil") + assert.False(netInfo == nil, msg, "network info is nil") + assert.False(respSvc == nil, msg, "response service is nil") + assert.True(version.IsValid(v), msg, "invalid version") res := &executorSvc{ state: s, diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go deleted file mode 100644 index 94e015abe..000000000 --- a/pkg/services/object/acl/eacl/v2/eacl_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "context" - "crypto/ecdsa" - "errors" - "testing" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type testLocalStorage struct { - t *testing.T - - expAddr oid.Address - - obj *objectSDK.Object - - err error -} - -func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(s.t, addr.Container().Equals(s.expAddr.Container())) - require.True(s.t, addr.Object().Equals(s.expAddr.Object())) - - return s.obj, s.err -} - -func testXHeaders(strs ...string) []session.XHeader { - res := make([]session.XHeader, len(strs)/2) - - for i := 0; i < len(strs); i += 2 { - res[i/2].SetKey(strs[i]) - res[i/2].SetValue(strs[i+1]) - } - - return res -} - -func TestHeadRequest(t *testing.T) { - req := new(objectV2.HeadRequest) - - meta := new(session.RequestMetaHeader) - req.SetMetaHeader(meta) - - body := new(objectV2.HeadRequestBody) - req.SetBody(body) - - addr := oidtest.Address() - - var addrV2 refs.Address - addr.WriteToV2(&addrV2) - - body.SetAddress(&addrV2) - - xKey := "x-key" - xVal := "x-val" - xHdrs := testXHeaders( - xKey, xVal, - ) - - meta.SetXHeaders(xHdrs) - - obj := objectSDK.New() - - attrKey := "attr_key" - attrVal := "attr_val" - var attr objectSDK.Attribute - attr.SetKey(attrKey) - attr.SetValue(attrVal) - obj.SetAttributes(attr) - - table := new(eaclSDK.Table) - - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - senderKey := priv.PublicKey() - - r := eaclSDK.NewRecord() - r.SetOperation(eaclSDK.OperationHead) - r.SetAction(eaclSDK.ActionDeny) - r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal) - r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal) - eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table.AddRecord(r) - - lStorage := &testLocalStorage{ - t: t, - expAddr: addr, - obj: obj, - } - - id := addr.Object() - - newSource := func(t *testing.T) eaclSDK.TypedHeaderSource { - hdrSrc, err := NewMessageHeaderSource( - lStorage, - NewRequestXHeaderSource(req), - addr.Container(), - WithOID(&id)) - require.NoError(t, err) - return hdrSrc - } - - cnr := addr.Container() - - unit := new(eaclSDK.ValidationUnit). - WithContainerID(&cnr). - WithOperation(eaclSDK.OperationHead). - WithSenderKey(senderKey.Bytes()). - WithEACLTable(table) - - validator := eaclSDK.NewValidator() - - checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(nil) - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(xHdrs) - - obj.SetAttributes() - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - lStorage.err = errors.New("any error") - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - r.SetAction(eaclSDK.ActionAllow) - - rID := eaclSDK.NewRecord() - rID.SetOperation(eaclSDK.OperationHead) - rID.SetAction(eaclSDK.ActionDeny) - rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object()) - eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table = eaclSDK.NewTable() - table.AddRecord(r) - table.AddRecord(rID) - - unit.WithEACLTable(table) - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) -} - -func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.True(t, fromRule) - require.Equal(t, expected, actual) -} - -func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.False(t, fromRule) - require.Equal(t, eaclSDK.ActionAllow, actual) -} diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go deleted file mode 100644 index ecb793df8..000000000 --- a/pkg/services/object/acl/eacl/v2/headers.go +++ /dev/null @@ -1,246 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type Option func(*cfg) - -type cfg struct { - storage ObjectStorage - - msg XHeaderSource - - cnr cid.ID - obj *oid.ID -} - -type ObjectStorage interface { - Head(context.Context, oid.Address) (*objectSDK.Object, error) -} - -type Request interface { - GetMetaHeader() *session.RequestMetaHeader -} - -type Response interface { - GetMetaHeader() *session.ResponseMetaHeader -} - -type headerSource struct { - requestHeaders []eaclSDK.Header - objectHeaders []eaclSDK.Header - - incompleteObjectHeaders bool -} - -func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) { - cfg := &cfg{ - storage: os, - cnr: cnrID, - msg: xhs, - } - - for i := range opts { - opts[i](cfg) - } - - if cfg.msg == nil { - return nil, errors.New("message is not provided") - } - - var res headerSource - - err := cfg.readObjectHeaders(&res) - if err != nil { - return nil, err - } - - res.requestHeaders = cfg.msg.GetXHeaders() - - return res, nil -} - -func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) { - switch typ { - default: - return nil, true - case eaclSDK.HeaderFromRequest: - return h.requestHeaders, true - case eaclSDK.HeaderFromObject: - return h.objectHeaders, !h.incompleteObjectHeaders - } -} - -type xHeader session.XHeader - -func (x xHeader) Key() string { - return (*session.XHeader)(&x).GetKey() -} - -func (x xHeader) Value() string { - return (*session.XHeader)(&x).GetValue() -} - -var errMissingOID = errors.New("object ID is missing") - -func (h *cfg) readObjectHeaders(dst *headerSource) error { - switch m := h.msg.(type) { - default: - panic(fmt.Sprintf("unexpected message type %T", h.msg)) - case requestXHeaderSource: - return h.readObjectHeadersFromRequestXHeaderSource(m, dst) - case responseXHeaderSource: - return h.readObjectHeadersResponseXHeaderSource(m, dst) - } -} - -func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error { - switch req := m.req.(type) { - case - *objectV2.GetRequest, - *objectV2.HeadRequest: - if h.obj == nil { - return errMissingOID - } - - objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objHeaders - dst.incompleteObjectHeaders = !completed - case - *objectV2.GetRangeRequest, - *objectV2.GetRangeHashRequest, - *objectV2.DeleteRequest: - if h.obj == nil { - return errMissingOID - } - - dst.objectHeaders = addressHeaders(h.cnr, h.obj) - case *objectV2.PutRequest: - if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.PutSingleRequest: - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj) - case *objectV2.SearchRequest: - cnrV2 := req.GetBody().GetContainerID() - var cnr cid.ID - - if cnrV2 != nil { - if err := cnr.ReadFromV2(*cnrV2); err != nil { - return fmt.Errorf("can't parse container ID: %w", err) - } - } - - dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)} - } - return nil -} - -func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error { - switch resp := m.resp.(type) { - default: - objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objectHeaders - dst.incompleteObjectHeaders = !completed - case *objectV2.GetResponse: - if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.HeadResponse: - oV2 := new(objectV2.Object) - - var hdr *objectV2.Header - - switch v := resp.GetBody().GetHeaderPart().(type) { - case *objectV2.ShortHeader: - hdr = new(objectV2.Header) - - var idV2 refsV2.ContainerID - h.cnr.WriteToV2(&idV2) - - hdr.SetContainerID(&idV2) - hdr.SetVersion(v.GetVersion()) - hdr.SetCreationEpoch(v.GetCreationEpoch()) - hdr.SetOwnerID(v.GetOwnerID()) - hdr.SetObjectType(v.GetObjectType()) - hdr.SetPayloadLength(v.GetPayloadLength()) - case *objectV2.HeaderWithSignature: - hdr = v.GetHeader() - } - - oV2.SetHeader(hdr) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - return nil -} - -func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) { - if idObj != nil { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*idObj) - - obj, err := h.storage.Head(context.TODO(), addr) - if err == nil { - return headersFromObject(obj, cnr, idObj), true - } - } - - return addressHeaders(cnr, idObj), false -} - -func cidHeader(idCnr cid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectContainerID, - v: idCnr.EncodeToString(), - } -} - -func oidHeader(obj oid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectID, - v: obj.EncodeToString(), - } -} - -func ownerIDHeader(ownerID user.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectOwnerID, - v: ownerID.EncodeToString(), - } -} - -func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - hh := make([]eaclSDK.Header, 0, 2) - hh = append(hh, cidHeader(cnr)) - - if oid != nil { - hh = append(hh, oidHeader(*oid)) - } - - return hh -} diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go deleted file mode 100644 index 92570a3c5..000000000 --- a/pkg/services/object/acl/eacl/v2/object.go +++ /dev/null @@ -1,92 +0,0 @@ -package v2 - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type sysObjHdr struct { - k, v string -} - -func (s sysObjHdr) Key() string { - return s.k -} - -func (s sysObjHdr) Value() string { - return s.v -} - -func u64Value(v uint64) string { - return strconv.FormatUint(v, 10) -} - -func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - var count int - for obj := obj; obj != nil; obj = obj.Parent() { - count += 9 + len(obj.Attributes()) - } - - res := make([]eaclSDK.Header, 0, count) - for ; obj != nil; obj = obj.Parent() { - res = append(res, - cidHeader(cnr), - // creation epoch - sysObjHdr{ - k: acl.FilterObjectCreationEpoch, - v: u64Value(obj.CreationEpoch()), - }, - // payload size - sysObjHdr{ - k: acl.FilterObjectPayloadLength, - v: u64Value(obj.PayloadSize()), - }, - // object version - sysObjHdr{ - k: acl.FilterObjectVersion, - v: obj.Version().String(), - }, - // object type - sysObjHdr{ - k: acl.FilterObjectType, - v: obj.Type().String(), - }, - ) - - if oid != nil { - res = append(res, oidHeader(*oid)) - } - - if idOwner := obj.OwnerID(); !idOwner.IsEmpty() { - res = append(res, ownerIDHeader(idOwner)) - } - - cs, ok := obj.PayloadChecksum() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectPayloadHash, - v: cs.String(), - }) - } - - cs, ok = obj.PayloadHomomorphicHash() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectHomomorphicHash, - v: cs.String(), - }) - } - - attrs := obj.Attributes() - for i := range attrs { - res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface - } - } - - return res -} diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go deleted file mode 100644 index d91a21c75..000000000 --- a/pkg/services/object/acl/eacl/v2/opts.go +++ /dev/null @@ -1,11 +0,0 @@ -package v2 - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func WithOID(v *oid.ID) Option { - return func(c *cfg) { - c.obj = v - } -} diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go deleted file mode 100644 index ce380c117..000000000 --- a/pkg/services/object/acl/eacl/v2/xheader.go +++ /dev/null @@ -1,69 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" -) - -type XHeaderSource interface { - GetXHeaders() []eaclSDK.Header -} - -type requestXHeaderSource struct { - req Request -} - -func NewRequestXHeaderSource(req Request) XHeaderSource { - return requestXHeaderSource{req: req} -} - -type responseXHeaderSource struct { - resp Response - - req Request -} - -func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource { - return responseXHeaderSource{resp: resp, req: req} -} - -func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - ln += len(meta.GetXHeaders()) - } - - res := make([]eaclSDK.Header, 0, ln) - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - for i := range x { - res = append(res, (xHeader)(x[i])) - } - } - - return res -} - -func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - xHdrs := make([][]session.XHeader, 0) - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - - ln += len(x) - - xHdrs = append(xHdrs, x) - } - - res := make([]eaclSDK.Header, 0, ln) - - for i := range xHdrs { - for j := range xHdrs[i] { - res = append(res, xHeader(xHdrs[i][j])) - } - } - - return res -} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go deleted file mode 100644 index cd2de174a..000000000 --- a/pkg/services/object/acl/v2/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -package v2 - -import ( - "fmt" -) - -const invalidRequestMessage = "malformed request" - -func malformedRequestError(reason string) error { - return fmt.Errorf("%s: %s", invalidRequestMessage, reason) -} - -var ( - errEmptyBody = malformedRequestError("empty body") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go deleted file mode 100644 index 15fcce884..000000000 --- a/pkg/services/object/acl/v2/opts.go +++ /dev/null @@ -1,12 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// WithLogger returns option to set logger. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go deleted file mode 100644 index 8bd34ccb3..000000000 --- a/pkg/services/object/acl/v2/request.go +++ /dev/null @@ -1,152 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "fmt" - - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// RequestInfo groups parsed version-independent (from SDK library) -// request information and raw API request. -type RequestInfo struct { - basicACL acl.Basic - requestRole acl.Role - operation acl.Op // put, get, head, etc. - cnrOwner user.ID // container owner - - // cnrNamespace defined to which namespace a container is belonged. - cnrNamespace string - - idCnr cid.ID - - // optional for some request - // e.g. Put, Search - obj *oid.ID - - senderKey []byte - - bearer *bearer.Token // bearer token of request - - srcRequest any -} - -func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) { - r.basicACL = basicACL -} - -func (r *RequestInfo) SetRequestRole(requestRole acl.Role) { - r.requestRole = requestRole -} - -func (r *RequestInfo) SetSenderKey(senderKey []byte) { - r.senderKey = senderKey -} - -// Request returns raw API request. -func (r RequestInfo) Request() any { - return r.srcRequest -} - -// ContainerOwner returns owner if the container. -func (r RequestInfo) ContainerOwner() user.ID { - return r.cnrOwner -} - -func (r RequestInfo) ContainerNamespace() string { - return r.cnrNamespace -} - -// ObjectID return object ID. -func (r RequestInfo) ObjectID() *oid.ID { - return r.obj -} - -// ContainerID return container ID. -func (r RequestInfo) ContainerID() cid.ID { - return r.idCnr -} - -// CleanBearer forces cleaning bearer token information. -func (r *RequestInfo) CleanBearer() { - r.bearer = nil -} - -// Bearer returns bearer token of the request. -func (r RequestInfo) Bearer() *bearer.Token { - return r.bearer -} - -// BasicACL returns basic ACL of the container. -func (r RequestInfo) BasicACL() acl.Basic { - return r.basicACL -} - -// SenderKey returns public key of the request's sender. -func (r RequestInfo) SenderKey() []byte { - return r.senderKey -} - -// Operation returns request's operation. -func (r RequestInfo) Operation() acl.Op { - return r.operation -} - -// RequestRole returns request sender's role. -func (r RequestInfo) RequestRole() acl.Role { - return r.requestRole -} - -// MetaWithToken groups session and bearer tokens, -// verification header and raw API request. -type MetaWithToken struct { - vheader *sessionV2.RequestVerificationHeader - token *sessionSDK.Object - bearer *bearer.Token - src any -} - -// RequestOwner returns ownerID and its public key -// according to internal meta information. -func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if r.vheader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if r.bearer != nil && r.bearer.Impersonate() { - return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if r.token != nil { - // verify signature of session token - return ownerFromToken(r.token) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(r.vheader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go deleted file mode 100644 index 86daec6cc..000000000 --- a/pkg/services/object/acl/v2/service.go +++ /dev/null @@ -1,779 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.uber.org/zap" -) - -// Service checks basic ACL rules. -type Service struct { - *cfg - - c objectCore.SenderClassifier -} - -type putStreamBasicChecker struct { - source *Service - next object.PutObjectStream -} - -type patchStreamBasicChecker struct { - source *Service - next object.PatchObjectStream - nonFirstSend bool -} - -// Option represents Service constructor option. -type Option func(*cfg) - -type cfg struct { - log *logger.Logger - - containers container.Source - - irFetcher InnerRingFetcher - - nm netmap.Source - - next object.ServiceServer -} - -// New is a constructor for object ACL checking service. -func New(next object.ServiceServer, - nm netmap.Source, - irf InnerRingFetcher, - cs container.Source, - opts ...Option, -) Service { - cfg := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - next: next, - nm: nm, - irFetcher: irf, - containers: cs, - } - - for i := range opts { - opts[i](cfg) - } - - return Service{ - cfg: cfg, - c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log), - } -} - -// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedGetObjectStream struct { - object.GetObjectStream - - requestInfo RequestInfo -} - -func (w *wrappedGetObjectStream) Context() context.Context { - return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream { - return &wrappedGetObjectStream{ - GetObjectStream: getObjectStream, - requestInfo: reqInfo, - } -} - -// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedRangeStream struct { - object.GetObjectRangeStream - - requestInfo RequestInfo -} - -func (w *wrappedRangeStream) Context() context.Context { - return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream { - return &wrappedRangeStream{ - GetObjectRangeStream: rangeStream, - requestInfo: reqInfo, - } -} - -// wrappedSearchStream propagates RequestContext into SearchStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedSearchStream struct { - object.SearchStream - - requestInfo RequestInfo -} - -func (w *wrappedSearchStream) Context() context.Context { - return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream { - return &wrappedSearchStream{ - SearchStream: searchStream, - requestInfo: reqInfo, - } -} - -// Get implements ServiceServer interface, makes ACL checks and calls -// next Get method in the ServiceServer pipeline. -func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) - if err != nil { - return err - } - - reqInfo.obj = obj - - return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) -} - -func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { - streamer, err := b.next.Put(ctx) - - return putStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { - streamer, err := b.next.Patch(ctx) - - return &patchStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Head( - ctx context.Context, - request *objectV2.HeadRequest, -) (*objectV2.HeadResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.Head(requestContext(ctx, reqInfo), request) -} - -func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { - id, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, id, nil) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) - if err != nil { - return err - } - - return b.next.Search(request, newWrappedSearchStream(stream, reqInfo)) -} - -func (b Service) Delete( - ctx context.Context, - request *objectV2.DeleteRequest, -) (*objectV2.DeleteResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.Delete(requestContext(ctx, reqInfo), request) -} - -func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) - if err != nil { - return err - } - - reqInfo.obj = obj - - return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo)) -} - -func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { - return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{ - Namespace: reqInfo.ContainerNamespace(), - ContainerOwner: reqInfo.ContainerOwner(), - SenderKey: reqInfo.SenderKey(), - Role: reqInfo.RequestRole(), - BearerToken: reqInfo.Bearer(), - }) -} - -func (b Service) GetRangeHash( - ctx context.Context, - request *objectV2.GetRangeHashRequest, -) (*objectV2.GetRangeHashResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) -} - -func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid object owner: %w", err) - } - - obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID()) - if err != nil { - return nil, err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return nil, err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.PutSingle(requestContext(ctx, reqInfo), request) -} - -func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - part := body.GetObjectPart() - if part, ok := part.(*objectV2.PutObjectPartInit); ok { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - idV2 := part.GetHeader().GetOwnerID() - if idV2 == nil { - return errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return fmt.Errorf("invalid object owner: %w", err) - } - - objV2 := part.GetObjectID() - var obj *oid.ID - - if objV2 != nil { - obj = new(oid.ID) - - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) - if err != nil { - return err - } - - reqInfo.obj = obj - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} - -func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - if !p.nonFirstSend { - p.nonFirstSend = true - - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - objV2 := request.GetBody().GetAddress().GetObjectID() - if objV2 == nil { - return errors.New("missing oid") - } - obj := new(oid.ID) - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) - if err != nil { - return err - } - - reqInfo.obj = obj - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { - cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch(ctx) - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - - if !assertVerb(*req.token, op) { - return info, errInvalidVerb - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.operation = op - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} - -// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. -func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { - cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch(ctx) - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go deleted file mode 100644 index 3cf10eb56..000000000 --- a/pkg/services/object/acl/v2/types.go +++ /dev/null @@ -1,11 +0,0 @@ -package v2 - -import "context" - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys(ctx context.Context) ([][]byte, error) -} diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go deleted file mode 100644 index 4b19cecfe..000000000 --- a/pkg/services/object/acl/v2/util_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" - aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "github.com/stretchr/testify/require" -) - -func TestOriginalTokens(t *testing.T) { - sToken := sessiontest.ObjectSigned() - bToken := bearertest.Token() - - pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, bToken.Sign(*pk)) - - var bTokenV2 acl.BearerToken - bToken.WriteToV2(&bTokenV2) - // This line is needed because SDK uses some custom format for - // reserved filters, so `cid.ID` is not converted to string immediately. - require.NoError(t, bToken.ReadFromV2(bTokenV2)) - - var sTokenV2 session.Token - sToken.WriteToV2(&sTokenV2) - - for i := range 10 { - metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) - res, err := originalSessionToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, sToken, res, i) - - bTok, err := originalBearerToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, &bToken, bTok, i) - } -} - -func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader { - metaHeader := new(session.RequestMetaHeader) - metaHeader.SetBearerToken(b) - metaHeader.SetSessionToken(s) - - for range depth { - link := metaHeader - metaHeader = new(session.RequestMetaHeader) - metaHeader.SetOrigin(link) - } - - return metaHeader -} - -func TestIsVerbCompatible(t *testing.T) { - // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28 - table := map[aclsdk.Op][]sessionSDK.ObjectVerb{ - aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet}, - aclsdk.OpObjectHead: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - }, - aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - var contains bool - for _, v := range list { - if v == verb { - contains = true - break - } - } - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index ee46a6fe4..b96757def 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -76,9 +76,10 @@ var errMissingOID = errors.New("object ID is not set") // CheckAPE prepares an APE-request and checks if it is permitted by policies. func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { // APE check is ignored for some inter-node requests. - if prm.Role == nativeschema.PropertyValueContainerRoleContainer { + switch prm.Role { + case nativeschema.PropertyValueContainerRoleContainer: return nil - } else if prm.Role == nativeschema.PropertyValueContainerRoleIR { + case nativeschema.PropertyValueContainerRoleIR: switch prm.Method { case nativeschema.MethodGetObject, nativeschema.MethodHeadObject, diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 6e458b384..82e660a7f 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -7,6 +7,21 @@ import ( apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) +var ( + errMissingContainerID = malformedRequestError("missing container ID") + errEmptyVerificationHeader = malformedRequestError("empty verification header") + errEmptyBodySig = malformedRequestError("empty at body signature") + errInvalidSessionSig = malformedRequestError("invalid session token signature") + errInvalidSessionOwner = malformedRequestError("invalid session token owner") + errInvalidVerb = malformedRequestError("session token verb is invalid") +) + +func malformedRequestError(reason string) error { + invalidArgErr := &apistatus.InvalidArgument{} + invalidArgErr.SetMessage(reason) + return invalidArgErr +} + func toStatusErr(err error) error { var chRouterErr *checkercore.ChainRouterError if !errors.As(err, &chRouterErr) { diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go new file mode 100644 index 000000000..b37c3b6f8 --- /dev/null +++ b/pkg/services/object/ape/metadata.go @@ -0,0 +1,172 @@ +package ape + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +type Metadata struct { + Container cid.ID + Object *oid.ID + MetaHeader *session.RequestMetaHeader + VerificationHeader *session.RequestVerificationHeader + SessionToken *sessionSDK.Object + BearerToken *bearer.Token +} + +func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { + if m.VerificationHeader == nil { + return nil, nil, errEmptyVerificationHeader + } + + if m.BearerToken != nil && m.BearerToken.Impersonate() { + return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) + } + + // if session token is presented, use it as truth source + if m.SessionToken != nil { + // verify signature of session token + return ownerFromToken(m.SessionToken) + } + + // otherwise get original body signature + bodySignature := originalBodySignature(m.VerificationHeader) + if bodySignature == nil { + return nil, nil, errEmptyBodySig + } + + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +// RequestInfo contains request information extracted by request metadata. +type RequestInfo struct { + // Role defines under which role this request is executed. + // It must be represented only as a constant represented in native schema. + Role string + + ContainerOwner user.ID + + // Namespace defines to which namespace a container is belonged. + Namespace string + + // HEX-encoded sender key. + SenderKey string +} + +type RequestInfoExtractor interface { + GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) +} + +type extractor struct { + containers container.Source + + nm netmap.Source + + classifier objectCore.SenderClassifier +} + +func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { + return &extractor{ + containers: containers, + nm: nm, + classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), + } +} + +func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { + currentEpoch, err := e.nm.Epoch(ctx) + if err != nil { + return errors.New("can't fetch current epoch") + } + if sessionToken.ExpiredAt(currentEpoch) { + return new(apistatus.SessionTokenExpired) + } + if sessionToken.InvalidAt(currentEpoch) { + return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) + } + if !assertVerb(*sessionToken, method) { + return errInvalidVerb + } + return nil +} + +func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { + cnr, err := e.containers.Get(ctx, m.Container) + if err != nil { + return ri, err + } + + if m.SessionToken != nil { + if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { + return ri, err + } + } + + ownerID, ownerKey, err := m.RequestOwner() + if err != nil { + return ri, err + } + res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) + if err != nil { + return ri, err + } + + ri.Role = nativeSchemaRole(res.Role) + ri.ContainerOwner = cnr.Value.Owner() + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + ri.Namespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + ri.SenderKey = hex.EncodeToString(res.Key) + + return ri, nil +} + +func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { + var sTok *sessionSDK.Object + + if tokV2 != nil { + sTok = new(sessionSDK.Object) + + err := sTok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { + // if session relates to object's removal, we don't check + // relation of the tombstone to the session here since user + // can't predict tomb's ID. + err = assertSessionRelation(*sTok, cnr, nil) + } else { + err = assertSessionRelation(*sTok, cnr, obj) + } + + if err != nil { + return nil, err + } + } + + return sTok, nil +} diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/ape/metadata_test.go similarity index 83% rename from pkg/services/object/acl/v2/request_test.go rename to pkg/services/object/ape/metadata_test.go index 618af3469..fd919008f 100644 --- a/pkg/services/object/acl/v2/request_test.go +++ b/pkg/services/object/ape/metadata_test.go @@ -1,4 +1,4 @@ -package v2 +package ape import ( "testing" @@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) { vh.SetBodySignature(&userSignature) t.Run("empty verification header", func(t *testing.T) { - req := MetaWithToken{} + req := Metadata{} checkOwner(t, req, nil, errEmptyVerificationHeader) }) t.Run("empty verification header signature", func(t *testing.T) { - req := MetaWithToken{ - vheader: new(sessionV2.RequestVerificationHeader), + req := Metadata{ + VerificationHeader: new(sessionV2.RequestVerificationHeader), } checkOwner(t, req, nil, errEmptyBodySig) }) t.Run("no tokens", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, + req := Metadata{ + VerificationHeader: vh, } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, false), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, false), } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) { pk, err := keys.NewPrivateKey() require.NoError(t, err) - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), - token: newSession(t, pk), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), + SessionToken: newSession(t, pk), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) t.Run("with session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - token: newSession(t, containerOwner), + req := Metadata{ + VerificationHeader: vh, + SessionToken: newSession(t, containerOwner), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) { var tok sessionSDK.Object require.NoError(t, tok.ReadFromV2(tokV2)) - req := MetaWithToken{ - vheader: vh, - token: &tok, + req := Metadata{ + VerificationHeader: vh, + SessionToken: &tok, } checkOwner(t, req, nil, errInvalidSessionOwner) }) @@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool return &tok } -func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) { +func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { _, actual, err := req.RequestOwner() if expectedErr != nil { require.ErrorIs(t, err, expectedErr) diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index d9594a3fc..e199e2638 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -2,9 +2,6 @@ package ape import ( "context" - "encoding/hex" - "errors" - "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" @@ -12,19 +9,18 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" ) -var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") - type Service struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.ServiceServer } @@ -64,9 +60,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { return &Service{ apeChecker: apeChecker, + extractor: extractor, next: next, } } @@ -76,15 +73,9 @@ type getStreamBasicChecker struct { apeChecker Checker - namespace string + metadata Metadata - senderKey []byte - - containerOwner user.ID - - role string - - bearerToken *bearer.Token + reqInfo RequestInfo } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { @@ -95,15 +86,15 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.namespace, + Namespace: g.reqInfo.Namespace, Container: cnrID, Object: objID, Header: partInit.GetHeader(), Method: nativeschema.MethodGetObject, - SenderKey: hex.EncodeToString(g.senderKey), - ContainerOwner: g.containerOwner, - Role: g.role, - BearerToken: g.bearerToken, + SenderKey: g.reqInfo.SenderKey, + ContainerOwner: g.reqInfo.ContainerOwner, + Role: g.reqInfo.Role, + BearerToken: g.metadata.BearerToken, XHeaders: resp.GetMetaHeader().GetXHeaders(), } @@ -114,69 +105,53 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { return g.GetObjectStream.Send(resp) } -func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) { - untyped := ctx.Value(objectSvc.RequestContextKey) - if untyped == nil { - return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey) - } - rc, ok := untyped.(*objectSvc.RequestContext) - if !ok { - return nil, errFailedToCastToRequestContext - } - return rc, nil -} - func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) + if err != nil { + return err } - return c.next.Get(request, &getStreamBasicChecker{ GetObjectStream: stream, apeChecker: c.apeChecker, - namespace: reqCtx.Namespace, - senderKey: reqCtx.SenderKey, - containerOwner: reqCtx.ContainerOwner, - role: nativeSchemaRole(reqCtx.Role), - bearerToken: reqCtx.BearerToken, + metadata: md, + reqInfo: reqInfo, }) } type putStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PutObjectStream } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Header: partInit.GetHeader(), Method: nativeschema.MethodPutObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -196,6 +171,7 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { return &putStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } @@ -203,40 +179,36 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { type patchStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PatchObjectStream nonFirstSend bool } func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - if !p.nonFirstSend { p.nonFirstSend = true - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodPatchObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -256,22 +228,17 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error return &patchStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) if err != nil { return nil, err } @@ -285,7 +252,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj switch headerPart := resp.GetBody().GetHeaderPart().(type) { case *objectV2.ShortHeader: cidV2 := new(refs.ContainerID) - cnrID.WriteToV2(cidV2) + md.Container.WriteToV2(cidV2) header.SetContainerID(cidV2) header.SetVersion(headerPart.GetVersion()) header.SetCreationEpoch(headerPart.GetCreationEpoch()) @@ -301,16 +268,16 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Header: header, Method: nativeschema.MethodHeadObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -319,32 +286,24 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - var cnrID cid.ID - if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { - if err := cnrID.ReadFromV2(*cnrV2); err != nil { - return toStatusErr(err) - } - } - - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) + if err != nil { + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, + Namespace: reqInfo.Namespace, + Container: md.Container, Method: nativeschema.MethodSearchObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -354,31 +313,25 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) if err != nil { return nil, err } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodDeleteObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -393,31 +346,25 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - reqCtx, err := requestContext(stream.Context()) + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) if err != nil { - return toStatusErr(err) + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodRangeObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -427,31 +374,25 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodHashObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -466,32 +407,26 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Header: request.GetBody().GetObject().GetHeader(), Method: nativeschema.MethodPutObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -501,18 +436,36 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ return c.next.PutSingle(ctx, request) } -func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return - } +type request interface { + GetMetaHeader() *session.RequestMetaHeader + GetVerificationHeader() *session.RequestVerificationHeader +} + +func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin } - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } + cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) + if err != nil { + return + } + session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) + if err != nil { + return + } + bearer, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return + } + + md = Metadata{ + Container: cnrID, + Object: objID, + VerificationHeader: request.GetVerificationHeader(), + SessionToken: session, + BearerToken: bearer, } return } diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go index 46e55360d..97dbfa658 100644 --- a/pkg/services/object/ape/types.go +++ b/pkg/services/object/ape/types.go @@ -7,3 +7,11 @@ import "context" type Checker interface { CheckAPE(context.Context, Prm) error } + +// InnerRingFetcher is an interface that must provide +// Inner Ring information. +type InnerRingFetcher interface { + // InnerRingKeys must return list of public keys of + // the actual inner ring. + InnerRingKeys(ctx context.Context) ([][]byte, error) +} diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/ape/util.go similarity index 58% rename from pkg/services/object/acl/v2/util.go rename to pkg/services/object/ape/util.go index e02f70771..5cd2caa50 100644 --- a/pkg/services/object/acl/v2/util.go +++ b/pkg/services/object/ape/util.go @@ -1,4 +1,4 @@ -package v2 +package ape import ( "crypto/ecdsa" @@ -6,57 +6,34 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) -var errMissingContainerID = errors.New("missing container ID") - -func getContainerIDFromRequest(req any) (cid.ID, error) { - var idV2 *refsV2.ContainerID - var id cid.ID - - switch v := req.(type) { - case *objectV2.GetRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutRequest: - part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit) - if !ok { - return cid.ID{}, errors.New("can't get container ID in chunk") +func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { + if cidV2 != nil { + if err = cnrID.ReadFromV2(*cidV2); err != nil { + return } - - idV2 = part.GetHeader().GetContainerID() - case *objectV2.HeadRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.SearchRequest: - idV2 = v.GetBody().GetContainerID() - case *objectV2.DeleteRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeHashRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutSingleRequest: - idV2 = v.GetBody().GetObject().GetHeader().GetContainerID() - case *objectV2.PatchRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - default: - return cid.ID{}, errors.New("unknown request type") + } else { + err = errMissingContainerID + return } - if idV2 == nil { - return cid.ID{}, errMissingContainerID + if objV2 != nil { + objID = new(oid.ID) + if err = objID.ReadFromV2(*objV2); err != nil { + return + } } - - return id, id.ReadFromV2(*idV2) + return } // originalBearerToken goes down to original request meta header and fetches @@ -75,50 +52,6 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er return &tok, tok.ReadFromV2(*tokV2) } -// originalSessionToken goes down to original request meta header and fetches -// session token from there. -func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) { - for header.GetOrigin() != nil { - header = header.GetOrigin() - } - - tokV2 := header.GetSessionToken() - if tokV2 == nil { - return nil, nil - } - - var tok sessionSDK.Object - - err := tok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - return &tok, nil -} - -// getObjectIDFromRequestBody decodes oid.ID from the common interface of the -// object reference's holders. Returns an error if object ID is missing in the request. -func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) { - idV2 := body.GetAddress().GetObjectID() - return getObjectIDFromRefObjectID(idV2) -} - -func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) { - if idV2 == nil { - return nil, errors.New("missing object ID") - } - - var id oid.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, err - } - - return &id, nil -} - func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { // 1. First check signature of session token. if !token.VerifySignature() { @@ -172,16 +105,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -// assertVerb checks that token verb corresponds to op. -func assertVerb(tok sessionSDK.Object, op acl.Op) bool { - switch op { - case acl.OpObjectPut: +// assertVerb checks that token verb corresponds to the method. +func assertVerb(tok sessionSDK.Object, method string) bool { + switch method { + case nativeschema.MethodPutObject: return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) - case acl.OpObjectDelete: + case nativeschema.MethodDeleteObject: return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case acl.OpObjectGet: + case nativeschema.MethodGetObject: return tok.AssertVerb(sessionSDK.VerbObjectGet) - case acl.OpObjectHead: + case nativeschema.MethodHeadObject: return tok.AssertVerb( sessionSDK.VerbObjectHead, sessionSDK.VerbObjectGet, @@ -190,14 +123,15 @@ func assertVerb(tok sessionSDK.Object, op acl.Op) bool { sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch, ) - case acl.OpObjectSearch: + case nativeschema.MethodSearchObject: return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case acl.OpObjectRange: + case nativeschema.MethodRangeObject: return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) - case acl.OpObjectHash: + case nativeschema.MethodHashObject: return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) + case nativeschema.MethodPatchObject: + return tok.AssertVerb(sessionSDK.VerbObjectPatch) } - return false } @@ -221,3 +155,15 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error return nil } + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid signature key: %w", err) + } + + var idSender user.ID + user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) + + return &idSender, key, nil +} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go new file mode 100644 index 000000000..916bce427 --- /dev/null +++ b/pkg/services/object/ape/util_test.go @@ -0,0 +1,84 @@ +package ape + +import ( + "slices" + "testing" + + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/stretchr/testify/require" +) + +func TestIsVerbCompatible(t *testing.T) { + table := map[string][]sessionSDK.ObjectVerb{ + nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, + nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, + nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, + nativeschema.MethodHeadObject: { + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectPatch, + }, + nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, + nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, + nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, + nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, + } + + verbs := []sessionSDK.ObjectVerb{ + sessionSDK.VerbObjectPut, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectSearch, + sessionSDK.VerbObjectPatch, + } + + var tok sessionSDK.Object + + for op, list := range table { + for _, verb := range verbs { + contains := slices.Contains(list, verb) + + tok.ForVerb(verb) + + require.Equal(t, contains, assertVerb(tok, op), + "%v in token, %s executing", verb, op) + } + } +} + +func TestAssertSessionRelation(t *testing.T) { + var tok sessionSDK.Object + cnr := cidtest.ID() + cnrOther := cidtest.ID() + obj := oidtest.ID() + objOther := oidtest.ID() + + // make sure ids differ, otherwise test won't work correctly + require.False(t, cnrOther.Equals(cnr)) + require.False(t, objOther.Equals(obj)) + + // bind session to the container (required) + tok.BindContainer(cnr) + + // test container-global session + require.NoError(t, assertSessionRelation(tok, cnr, nil)) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnrOther, nil)) + require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) + + // limit the session to the particular object + tok.LimitByObjects(obj) + + // test fixed object session (here obj arg must be non-nil everywhere) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnr, &objOther)) +} diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index dde9f8fc0..f8ee089fe 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -163,7 +163,7 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) @@ -224,7 +224,7 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index b2ae79dbc..f2bd907db 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -89,10 +89,8 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme if !ownerObj.Equals(ownerSession) { return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) } - } else { - if !ownerObj.Equals(sessionInfo.Owner) { - return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) - } + } else if !ownerObj.Equals(sessionInfo.Owner) { + return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) } if prm.SignRequestPrivateKey == nil { diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index dae168baf..6593d3ca0 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { } func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(ctx, n.Traversal.Opts...) + traverser, err := placement.NewTraverser(ctx, n.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } @@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, } // perform additional container broadcast if needed - if n.Traversal.submitPrimaryPlacementFinish() { + if n.submitPrimaryPlacementFinish() { err := n.ForEachNode(ctx, f) if err != nil { n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) @@ -79,11 +79,11 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. continue } - workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey()) + isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey()) item := new(bool) wg.Add(1) - if err := workerPool.Submit(func() { + go func() { defer wg.Done() err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) @@ -95,17 +95,13 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. traverser.SubmitSuccess() *item = true - }); err != nil { - wg.Done() - svcutil.LogWorkerPoolError(ctx, n.cfg.Logger, "PUT", err) - return true - } + }() // Mark the container node as processed in order to exclude it // in subsequent container broadcast. Note that we don't // process this node during broadcast if primary placement // on it failed. - n.Traversal.submitProcessed(addr, item) + n.submitProcessed(addr, item) } wg.Wait() diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go index f7486eae7..fff58aca7 100644 --- a/pkg/services/object/common/writer/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -95,6 +95,10 @@ func (x errIncompletePut) Error() string { return commonMsg } +func (x errIncompletePut) Unwrap() error { + return x.singleErr +} + // WriteObject implements the transformer.ObjectWriter interface. func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { t.obj = obj diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 8f269ec21..26a53e315 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -149,17 +149,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } - completed := make(chan interface{}) - if poolErr := e.Config.RemotePool.Submit(func() { - defer close(completed) - err = e.Relay(ctx, info, c) - }); poolErr != nil { - close(completed) - svcutil.LogWorkerPoolError(ctx, e.Config.Logger, "PUT", poolErr) - return poolErr - } - <-completed - + err = e.Relay(ctx, info, c) if err == nil { return nil } @@ -343,21 +333,11 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n } func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { - var err error localTarget := LocalTarget{ Storage: e.Config.LocalStore, Container: e.Container, } - completed := make(chan interface{}) - if poolErr := e.Config.LocalPool.Submit(func() { - defer close(completed) - err = localTarget.WriteObject(ctx, obj, e.ObjectMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return localTarget.WriteObject(ctx, obj, e.ObjectMeta) } func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { @@ -371,15 +351,5 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n nodeInfo: clientNodeInfo, } - var err error - completed := make(chan interface{}) - if poolErr := e.Config.RemotePool.Submit(func() { - defer close(completed) - err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index b7764661f..d5eeddf21 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -31,7 +31,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" ) @@ -131,17 +130,13 @@ func TestECWriter(t *testing.T) { nodeKey, err := keys.NewPrivateKey() require.NoError(t, err) - pool, err := ants.NewPool(4, ants.WithNonblocking(true)) - require.NoError(t, err) - - log, err := logger.NewLogger(nil) + log, err := logger.NewLogger(logger.Prm{}) require.NoError(t, err) var n nmKeys ecw := ECWriter{ Config: &Config{ NetmapKeys: n, - RemotePool: pool, Logger: log, ClientConstructor: clientConstructor{vectors: ns}, KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go index adaf1945b..d3d2b41b4 100644 --- a/pkg/services/object/common/writer/writer.go +++ b/pkg/services/object/common/writer/writer.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -52,8 +51,6 @@ type Config struct { NetmapSource netmap.Source - RemotePool, LocalPool util.WorkerPool - NetmapKeys netmap.AnnouncedKeys FormatValidator *object.FormatValidator @@ -69,12 +66,6 @@ type Config struct { type Option func(*Config) -func WithWorkerPools(remote, local util.WorkerPool) Option { - return func(c *Config) { - c.RemotePool, c.LocalPool = remote, local - } -} - func WithLogger(l *logger.Logger) Option { return func(c *Config) { c.Logger = l @@ -87,13 +78,6 @@ func WithVerifySessionTokenIssuer(v bool) Option { } } -func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) { - if c.NetmapKeys.IsLocalKey(pub) { - return c.LocalPool, true - } - return c.RemotePool, false -} - type Params struct { Config *Config diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index 36a17bde2..a99ba3586 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) { for i := range members { for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body if members[i].Equals(incoming[j]) { - incoming = append(incoming[:j], incoming[j+1:]...) + incoming = slices.Delete(incoming, j, j+1) j-- } } diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 867d3f4ef..1c4d7d585 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "objectSDK.Delete service")) + c.log = l } } diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index e164627d2..e80132489 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque detachedExecutor.execute(ctx) - return detachedExecutor.statusError.err + return detachedExecutor.err } diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index 0ee8aed53..dfb31133c 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -28,16 +28,7 @@ func (r *request) executeOnContainer(ctx context.Context) { localStatus := r.status - for { - if r.processCurrentEpoch(ctx, localStatus) { - break - } - - // check the maximum depth has been reached - if lookupDepth == 0 { - break - } - + for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { lookupDepth-- // go to the previous epoch diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 557e9a028..3a50308c2 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { exec.execute(ctx) - return exec.statusError.err + return exec.err } -func (exec *request) execute(ctx context.Context) { - exec.log.Debug(ctx, logs.ServingRequest) +func (r *request) execute(ctx context.Context) { + r.log.Debug(ctx, logs.ServingRequest) // perform local operation - exec.executeLocal(ctx) + r.executeLocal(ctx) - exec.analyzeStatus(ctx, true) + r.analyzeStatus(ctx, true) } -func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { +func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result - switch exec.status { + switch r.status { case statusOK: - exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) + r.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) + r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) - exec.assemble(ctx) + r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) + r.assemble(ctx) case statusOutOfRange: - exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) + r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - exec.log.Debug(ctx, logs.GetRequestedObjectIsEC) - if exec.isRaw() && execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.log.Debug(ctx, logs.GetRequestedObjectIsEC) + if r.isRaw() && execCnr { + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } - exec.assembleEC(ctx) + r.assembleEC(ctx) default: - exec.log.Debug(ctx, logs.OperationFinishedWithError, - zap.Error(exec.err), + r.log.Debug(ctx, logs.OperationFinishedWithError, + zap.Error(r.err), ) var errAccessDenied *apistatus.ObjectAccessDenied - if execCnr && errors.As(exec.err, &errAccessDenied) { + if execCnr && errors.As(r.err, &errAccessDenied) { // Local get can't return access denied error, so this error was returned by // write to the output stream. So there is no need to try to find object on other nodes. return } if execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } } } diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index 9ec10b5f2..a103f5a7f 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = l.With(zap.String("component", "Object.Get service")) + s.log = l } } diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index fc483b74b..0ec8912fd 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Object.Get V2 service")) + c.log = l } } diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index 98207336c..0d73bcd4d 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec p.SetHeader(objV2.GetHeader()) p.SetSignature(objV2.GetSignature()) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { p := new(objectV2.GetObjectPartChunk) p.SetChunk(chunk) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { @@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { } func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) + return s.Send(newRangeResponse(chunk)) } func newRangeResponse(p []byte) *objectV2.GetRangeResponse { diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index bfa7fd619..e699a3779 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -3,6 +3,7 @@ package getsvc import ( "context" "crypto/sha256" + "errors" "hash" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -182,9 +183,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran default: return nil, errUnknownChechsumType(t) case refs.SHA256: - p.SetHashGenerator(func() hash.Hash { - return sha256.New() - }) + p.SetHashGenerator(sha256.New) case refs.TillichZemor: p.SetHashGenerator(func() hash.Hash { return tz.New() @@ -360,19 +359,20 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { var err error - - defer func() { - stop = err == nil - - if stop || firstErr == nil { - firstErr = err - } - - // would be nice to log otherwise - }() - res, err = f(ctx, addr, c, key) + // non-status logic error that could be returned + // from the SDK client; should not be considered + // as a connection error + var siErr *objectSDK.SplitInfoError + var eiErr *objectSDK.ECInfoError + + stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr) + + if stop || firstErr == nil { + firstErr = err + } + return }) diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 19748e938..6a6ee0f0f 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -4,6 +4,7 @@ import ( "context" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) @@ -34,7 +35,7 @@ type ( } MetricRegister interface { - AddRequestDuration(string, time.Duration, bool) + AddRequestDuration(string, time.Duration, bool, string) AddPayloadSize(string, int) } ) @@ -51,7 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er if m.enabled { t := time.Now() defer func() { - m.metrics.AddRequestDuration("Get", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) }() err = m.next.Get(req, &getStreamMetric{ ServerStream: stream, @@ -106,7 +107,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl res, err := m.next.PutSingle(ctx, request) - m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil) + m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) if err == nil { m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload())) } @@ -122,7 +123,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) res, err := m.next.Head(ctx, request) - m.metrics.AddRequestDuration("Head", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -135,7 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream) err := m.next.Search(req, stream) - m.metrics.AddRequestDuration("Search", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -148,7 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque res, err := m.next.Delete(ctx, request) - m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } return m.next.Delete(ctx, request) @@ -160,7 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR err := m.next.GetRange(req, stream) - m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -173,7 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa res, err := m.next.GetRangeHash(ctx, request) - m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -209,7 +210,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -223,7 +224,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go index 953f82b48..5d298bfed 100644 --- a/pkg/services/object/patch/service.go +++ b/pkg/services/object/patch/service.go @@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config, // Patch calls internal service and returns v2 object streamer. func (s *Service) Patch() (object.PatchObjectStream, error) { - nodeKey, err := s.Config.KeyStorage.GetKey(nil) + nodeKey, err := s.KeyStorage.GetKey(nil) if err != nil { return nil, err } diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 5aba13f66..ff13b1d3e 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -195,7 +195,12 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { patch.FromV2(req.GetBody()) if !s.nonFirstSend { - err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes) + err := s.patcher.ApplyHeaderPatch(ctx, + patcher.ApplyHeaderPatchPrm{ + NewSplitHeader: patch.NewSplitHeader, + NewAttributes: patch.NewAttributes, + ReplaceAttributes: patch.ReplaceAttributes, + }) if err != nil { return fmt.Errorf("patch attributes: %w", err) } @@ -214,6 +219,9 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { } func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + if s.patcher == nil { + return nil, errors.New("uninitialized patch streamer") + } patcherResp, err := s.patcher.Close(ctx) if err != nil { return nil, err diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 5cc0a5722..7aeb5857d 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -27,8 +26,6 @@ func NewService(ks *objutil.KeyStorage, opts ...objectwriter.Option, ) *Service { c := &objectwriter.Config{ - RemotePool: util.NewPseudoWorkerPool(), - LocalPool: util.NewPseudoWorkerPool(), Logger: logger.NewLoggerWrapper(zap.L()), KeyStorage: ks, ClientConstructor: cc, @@ -59,8 +56,8 @@ func NewService(ks *objutil.KeyStorage, } } -func (p *Service) Put() (*Streamer, error) { +func (s *Service) Put() (*Streamer, error) { return &Streamer{ - Config: p.Config, + Config: s.Config, }, nil } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index f3c2dca1a..90f473254 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -102,7 +102,7 @@ func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Obje return target.ErrWrongPayloadSize } - maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize(ctx) + maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) if obj.PayloadSize() > maxAllowedSize { return target.ErrExceedingMaxSize } @@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o } func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.Config.NewNodeIterator(placement.placementOptions) + iter := s.NewNodeIterator(placement.placementOptions) iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast signer := &putSingleRequestSigner{ req: req, - keyStorage: s.Config.KeyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } @@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace if err != nil { return err } - key, err := s.Config.KeyStorage.GetKey(nil) + key, err := s.KeyStorage.GetKey(nil) if err != nil { return err } signer := &putSingleRequestSigner{ req: req, - keyStorage: s.Config.KeyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } @@ -225,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.Config.ContainerSource.Get(ctx, cnrID) + cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.Config.NetmapSource) + latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } builder := placement.NewNetworkMapBuilder(latestNetmap) if localOnly { result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys) + builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) } result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) return result, nil @@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - c, err := s.Config.ClientConstructor.Get(info) + c, err := s.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } @@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { localTarget := &objectwriter.LocalTarget{ - Storage: s.Config.LocalStore, + Storage: s.LocalStore, Container: container, } return localTarget.WriteObject(ctx, obj, meta) @@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, + s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go new file mode 100644 index 000000000..01eb1ea8d --- /dev/null +++ b/pkg/services/object/qos.go @@ -0,0 +1,145 @@ +package object + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" +) + +var _ ServiceServer = (*qosObjectService)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type qosObjectService struct { + next ServiceServer + adj AdjustIOTag +} + +func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer { + return &qosObjectService{ + next: next, + adj: adjIOTag, + } +} + +func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Delete(ctx, req) +} + +func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Get(req, &qosReadStream[*object.GetResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRangeHash(ctx, req) +} + +func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Head(ctx, req) +} + +func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) { + s, err := q.next.Patch(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) { + s, err := q.next.Put(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PutRequest, *object.PutResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.PutSingle(ctx, req) +} + +func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Search(req, &qosReadStream[*object.SearchResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosReadStream[T any] struct { + sender qosSend[T] + ctxF func() context.Context +} + +func (g *qosReadStream[T]) Context() context.Context { + return g.ctxF() +} + +func (g *qosReadStream[T]) Send(resp T) error { + return g.sender.Send(resp) +} + +type qosVerificationHeader interface { + GetVerificationHeader() *session.RequestVerificationHeader +} + +type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { + Send(context.Context, TReq) error + CloseAndRecv(context.Context) (TResp, error) +} + +type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { + s qosSendRecv[TReq, TResp] + adj AdjustIOTag + + ioTag string + ioTagDefined bool +} + +func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { + if q.ioTagDefined { + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + } + return q.s.CloseAndRecv(ctx) +} + +func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { + if !q.ioTagDefined { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx) + } + assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment") + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + return q.s.Send(ctx, req) +} diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go deleted file mode 100644 index eb4041f80..000000000 --- a/pkg/services/object/request_context.go +++ /dev/null @@ -1,24 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type RequestContextKeyT struct{} - -var RequestContextKey = RequestContextKeyT{} - -// RequestContext is a context passed between middleware handlers. -type RequestContext struct { - Namespace string - - SenderKey []byte - - ContainerOwner user.ID - - Role acl.Role - - BearerToken *bearer.Token -} diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index e1aeca957..56fe56468 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -94,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Object.Search service")) + c.log = l } } diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index fed168187..0be5345b9 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -2,6 +2,7 @@ package searchsvc import ( "context" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -53,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error { } // exclude processed address - list = append(list[:i], list[i+1:]...) + list = slices.Delete(list, i, i+1) i-- } diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index 2b44227a5..fd8e926dd 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -96,7 +96,8 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PutResponse) } } @@ -132,7 +133,8 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PatchResponse) } } diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 2c1e053ac..b10826226 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -17,11 +17,3 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net zap.Error(err), ) } - -// LogWorkerPoolError writes debug error message of object worker pool to provided logger. -func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { - l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, - zap.String("request", req), - zap.Error(err), - ) -} diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go index 195944f92..f74b0aab9 100644 --- a/pkg/services/object/util/placement.go +++ b/pkg/services/object/util/placement.go @@ -3,6 +3,7 @@ package util import ( "context" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -93,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o } if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - vs[i] = append(vs[i][:j], vs[i][j+1:]...) + vs[i] = slices.Delete(vs[i], j, j+1) j-- } } diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go index 45e6df339..0f24a9d96 100644 --- a/pkg/services/object_manager/placement/metrics.go +++ b/pkg/services/object_manager/placement/metrics.go @@ -2,24 +2,90 @@ package placement import ( "errors" + "fmt" + "maps" + "math" "strings" + "sync" + "sync/atomic" + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" + locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) const ( attrPrefix = "$attribute:" + + geoDistance = "$geoDistance" ) type Metric interface { CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int } -func ParseMetric(raw string) (Metric, error) { - if attr, found := strings.CutPrefix(raw, attrPrefix); found { - return NewAttributeMetric(attr), nil +type metricsParser struct { + locodeDBPath string + locodes map[string]locodedb.Point +} + +type MetricParser interface { + ParseMetrics([]string) ([]Metric, error) +} + +func NewMetricsParser(locodeDBPath string) (MetricParser, error) { + return &metricsParser{ + locodeDBPath: locodeDBPath, + }, nil +} + +func (p *metricsParser) initLocodes() error { + if len(p.locodes) != 0 { + return nil } - return nil, errors.New("unsupported priority metric") + if len(p.locodeDBPath) > 0 { + p.locodes = make(map[string]locodedb.Point) + locodeDB := locodebolt.New(locodebolt.Prm{ + Path: p.locodeDBPath, + }, + locodebolt.ReadOnly(), + ) + err := locodeDB.Open() + if err != nil { + return err + } + defer locodeDB.Close() + err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { + p.locodes[k] = v + }) + if err != nil { + return err + } + return nil + } + return errors.New("set path to locode database") +} + +func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { + var metrics []Metric + for _, raw := range priority { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + metrics = append(metrics, NewAttributeMetric(attr)) + } else if raw == geoDistance { + err := p.initLocodes() + if err != nil { + return nil, err + } + if len(p.locodes) == 0 { + return nil, fmt.Errorf("provide locodes database for metric %s", raw) + } + m := NewGeoDistanceMetric(p.locodes) + metrics = append(metrics, m) + } else { + return nil, fmt.Errorf("unsupported priority metric %s", raw) + } + } + return metrics, nil } // attributeMetric describes priority metric based on attribute. @@ -41,3 +107,79 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node func NewAttributeMetric(attr string) Metric { return &attributeMetric{attribute: attr} } + +// geoDistanceMetric describes priority metric based on attribute. +type geoDistanceMetric struct { + locodes map[string]locodedb.Point + distance *atomic.Pointer[map[string]int] + mtx sync.Mutex +} + +func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { + d := atomic.Pointer[map[string]int]{} + m := make(map[string]int) + d.Store(&m) + gm := &geoDistanceMetric{ + locodes: locodes, + distance: &d, + } + return gm +} + +// CalculateValue return distance in kilometers between current node and provided, +// if coordinates for provided node found. In other case return math.MaxInt. +func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { + fl := from.LOCODE() + tl := to.LOCODE() + if fl == tl { + return 0 + } + m := gm.distance.Load() + if v, ok := (*m)[fl+tl]; ok { + return v + } + return gm.calculateDistance(fl, tl) +} + +func (gm *geoDistanceMetric) calculateDistance(from, to string) int { + gm.mtx.Lock() + defer gm.mtx.Unlock() + od := gm.distance.Load() + if v, ok := (*od)[from+to]; ok { + return v + } + nd := maps.Clone(*od) + var dist int + pointFrom, okFrom := gm.locodes[from] + pointTo, okTo := gm.locodes[to] + if okFrom && okTo { + dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) + } else { + dist = math.MaxInt + } + nd[from+to] = dist + gm.distance.Store(&nd) + + return dist +} + +// distance return amount of KM between two points. +// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. +// Original implementation can be found here https://www.geodatasource.com/developers/go. +func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { + radLat1 := math.Pi * lt1 / 180 + radLat2 := math.Pi * lt2 / 180 + radTheta := math.Pi * (ln1 - ln2) / 180 + + dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) + + if dist > 1 { + dist = 1 + } + + dist = math.Acos(dist) + dist = dist * 180 / math.Pi + dist = dist * 60 * 1.1515 * 1.609344 + + return dist +} diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index efa4a5b06..a3f9af959 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -288,8 +288,8 @@ func (t *Traverser) Next() []Node { func (t *Traverser) skipEmptyVectors() { for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 { - t.vectors = append(t.vectors[:i], t.vectors[i+1:]...) - t.rem = append(t.rem[:i], t.rem[i+1:]...) + t.vectors = slices.Delete(t.vectors, i, i+1) + t.rem = slices.Delete(t.rem, i, i+1) i-- } else { break diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index 9c825bf19..d1370f21e 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -601,4 +601,53 @@ func TestTraverserPriorityMetrics(t *testing.T) { next = tr.Next() require.Nil(t, next) }) + + t.Run("one rep one geo metric", func(t *testing.T) { + t.Skip() + selectors := []int{2} + replicas := []int{2} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") + + sdkNode := testNode(2) + sdkNode.SetAttribute("UN-LOCODE", "FI HEL") + + nodesCopy := copyVectors(nodes) + + parser, err := NewMetricsParser("/path/to/locode_db") + require.NoError(t, err) + m, err := parser.ParseMetrics([]string{geoDistance}) + require.NoError(t, err) + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `$geoDistance` the order will be: + // [ {Node_0 RU MOW}, {Node_1 RU LED}] + // With priority metric `$geoDistance` the order should be: + // [ {Node_1 RU LED}, {Node_0 RU MOW}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) } diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index a4e36c2dc..e5f001d5a 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -61,10 +61,8 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) - } else { - if ts != nil { - return g.handleTS(ctx, addrStr, ts, epoch) - } + } else if ts != nil { + return g.handleTS(ctx, addrStr, ts, epoch) } // requested tombstone not diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 67ddf316f..2147a32fe 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -3,6 +3,7 @@ package tombstone import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker { panicOnNil(cfg.tsSource, "Tombstone source") cache, err := lru.New[string, uint64](cfg.cacheSize) - if err != nil { - panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err)) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) return &ExpirationChecker{ cache: cache, diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go index 1ff07b05a..975941847 100644 --- a/pkg/services/object_manager/tombstone/source/source.go +++ b/pkg/services/object_manager/tombstone/source/source.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -38,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { // Panics if any of the provided options does not allow // constructing a valid tombstone local Source. func NewSource(p TombstoneSourcePrm) Source { - if p.s == nil { - panic("Tombstone source: nil object service") - } + assert.False(p.s == nil, "Tombstone source: nil object service") return Source(p) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index 1ee31d480..fbdeb3148 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { var removeLocalChunk bool requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { // current node is required node, we are happy return ecChunkProcessResult{ validPlacement: true, @@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec if uint32(i) == objInfo.ECInfo.Total { break } - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} } } @@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { var eiErr *objectSDK.ECInfoError for _, n := range nodes { - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { continue } _, err := p.remoteHeader(ctx, n, parentAddress, true) @@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info return } var err error - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { _, err = p.localHeader(ctx, parentAddress) } else { _, err = p.remoteHeader(ctx, n, parentAddress, true) @@ -283,7 +283,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info } } else if client.IsErrObjectAlreadyRemoved(err) { restore = false - } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { + } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, @@ -343,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, pID, _ := part.ID() addr.SetObject(pID) targetNode := nodes[idx%len(nodes)] - if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ Addr: addr, Obj: part, @@ -371,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I var obj *objectSDK.Object var err error for _, node := range nodes { - if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { obj, err = p.localObject(egCtx, objID) } else { obj, err = p.remoteObject(egCtx, node, objID) diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index 4e8bacfec..c91e7cc7c 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -1,12 +1,13 @@ package policer import ( + "fmt" "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" - "go.uber.org/zap" ) type objectsInWork struct { @@ -54,12 +55,8 @@ func New(opts ...Option) *Policer { opts[i](c) } - c.log = c.log.With(zap.String("component", "Object Policer")) - cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - if err != nil { - panic(err) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) return &Policer{ cfg: c, diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index cef4c36d9..049c33753 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "slices" "sort" "testing" "time" @@ -226,10 +227,8 @@ func TestProcessObject(t *testing.T) { return nil, err } } - for _, i := range ti.objHolders { - if index == i { - return nil, nil - } + if slices.Contains(ti.objHolders, index) { + return nil, nil } return nil, new(apistatus.ObjectNotFound) } diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index bd830d04e..635a5683b 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -7,7 +7,9 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -18,6 +20,7 @@ func (p *Policer) Run(ctx context.Context) { } func (p *Policer) shardPolicyWorker(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String()) for { select { case <-ctx.Done(): diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index bb38c72ad..216fe4919 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -3,6 +3,7 @@ package replicator import ( "context" "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" @@ -42,11 +43,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err == nil { break } - var endpoints []string - node.IterateNetworkEndpoints(func(s string) bool { - endpoints = append(endpoints, s) - return false - }) + endpoints := slices.Collect(node.NetworkEndpoints()) p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index 6910fa5af..a940cef37 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -7,7 +7,6 @@ import ( objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Replicator represents the utility that replicates @@ -45,8 +44,6 @@ func New(opts ...Option) *Replicator { opts[i](c) } - c.log = c.log.With(zap.String("component", "Object Replicator")) - return &Replicator{ cfg: c, } diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 12b221613..f0591de71 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -33,10 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(ctx, logs.ServingRequest, - zap.String("component", "SessionService"), - zap.String("request", "Create"), - ) + s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) respBody, err := s.exec.Create(ctx, req.GetBody()) if err != nil { diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index d312ea0ea..132d62445 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { // enable encryption if it // was configured so if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) + rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) cfg.privateKey.D.FillBytes(rawKey) c, err := aes.NewCipher(rawKey) diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index c4b03cbe6..58757ff6d 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -22,7 +22,7 @@ import ( ) func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) (aperequest.Request, error) { schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) if err != nil { @@ -53,15 +53,19 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) } + resProps := map[string]string{ + nativeschema.ProperyKeyTreeID: treeID, + } + return aperequest.NewRequest( schemaMethod, - aperequest.NewResource(resourceName, make(map[string]string)), + aperequest.NewResource(resourceName, resProps), reqProps, ), nil } func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) error { namespace := "" cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") @@ -69,7 +73,7 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, namespace = cntNamespace } - request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey) + request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) if err != nil { return fmt.Errorf("failed to create ape request: %w", err) } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go index 0afc7660a..7b209fd47 100644 --- a/pkg/services/tree/ape_test.go +++ b/pkg/services/tree/ape_test.go @@ -107,6 +107,45 @@ func TestCheckAPE(t *testing.T) { cid := cid.ID{} _ = cid.DecodeString(containerID) + t.Run("treeID rule", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.QuotaLimitReached, + Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindResource, + Key: nativeschema.ProperyKeyTreeID, + Value: versionTreeID, + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) + + var chErr *checkercore.ChainRouterError + require.ErrorAs(t, err, &chErr) + require.Equal(t, chain.QuotaLimitReached, chErr.Status()) + }) + t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { los := inmemory.NewInmemoryLocalStorage() mcs := inmemory.NewInmemoryMorphRuleChainStorage() @@ -152,7 +191,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) @@ -201,7 +240,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) } diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index ac80d0e4c..a11700771 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -10,12 +10,9 @@ import ( internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" ) type clientCache struct { @@ -51,7 +48,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { c.Lock() - ccInt, ok := c.LRU.Get(netmapAddr) + ccInt, ok := c.Get(netmapAddr) c.Unlock() if ok { @@ -69,14 +66,19 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl } } - cc, err := c.dialTreeService(ctx, netmapAddr) + var netAddr network.Address + if err := netAddr.FromString(netmapAddr); err != nil { + return nil, err + } + + cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) lastTry := time.Now() c.Lock() if err != nil { - c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) } else { - c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) } c.Unlock() @@ -86,48 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl return NewTreeServiceClient(cc), nil } - -func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) { - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - opts := []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), - ), - grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - ), - grpc.WithContextDialer(c.ds.GrpcContextDialer()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - } - - if !netAddr.IsTLSEnabled() { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, c.key); err != nil { - return nil, err - } - - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - defer cancel() - // perform some request to check connection - if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { - _ = cc.Close() - return nil, err - } - return cc, nil -} diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go index 0f0e4ee57..07503f8c3 100644 --- a/pkg/services/tree/metrics.go +++ b/pkg/services/tree/metrics.go @@ -6,6 +6,7 @@ type MetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type defaultMetricsRegister struct{} @@ -13,3 +14,4 @@ type defaultMetricsRegister struct{} func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {} +func (defaultMetricsRegister) AddOperation(string, string) {} diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 4ad760846..56cbcc081 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -3,6 +3,7 @@ package tree import ( "context" "crypto/ecdsa" + "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -41,7 +42,7 @@ type cfg struct { replicatorWorkerCount int replicatorTimeout time.Duration containerCacheSize int - authorizedKeys [][]byte + authorizedKeys atomic.Pointer[[][]byte] syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage @@ -147,10 +148,7 @@ func WithMetrics(v MetricsRegister) Option { // keys that have rights to use Tree service. func WithAuthorizedKeys(keys keys.PublicKeys) Option { return func(c *cfg) { - c.authorizedKeys = nil - for _, key := range keys { - c.authorizedKeys = append(c.authorizedKeys, key.Bytes()) - } + c.authorizedKeys.Store(fromPublicKeys(keys)) } } diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go new file mode 100644 index 000000000..8f21686df --- /dev/null +++ b/pkg/services/tree/qos.go @@ -0,0 +1,101 @@ +package tree + +import ( + "context" + + "google.golang.org/grpc" +) + +var _ TreeServiceServer = (*ioTagAdjust)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type ioTagAdjust struct { + s TreeServiceServer + a AdjustIOTag +} + +func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer { + return &ioTagAdjust{ + s: s, + a: a, + } +} + +func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Add(ctx, req) +} + +func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.AddByPath(ctx, req) +} + +func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Apply(ctx, req) +} + +func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.GetNodeByPath(ctx, req) +} + +func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Healthcheck(ctx, req) +} + +func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Move(ctx, req) +} + +func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Remove(ctx, req) +} + +func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.TreeList(ctx, req) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosServerWrapper[T any] struct { + grpc.ServerStream + sender qosSend[T] + ctxF func() context.Context +} + +func (w *qosServerWrapper[T]) Send(resp T) error { + return w.sender.Send(resp) +} + +func (w *qosServerWrapper[T]) Context() context.Context { + return w.ctxF() +} diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index d92c749a8..647f8cb30 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -19,8 +19,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request") func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { var resp *Resp var outErr error - err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = callback(c, ctx, req) + err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = callback(c, fCtx, req) return true }) if err != nil { @@ -31,7 +31,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { +func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { for _, n := range cntNodes { if bytes.Equal(n.PublicKey(), s.rawPub) { return nil @@ -41,24 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo var called bool for _, n := range cntNodes { var stop bool - n.IterateNetworkEndpoints(func(endpoint string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false + for endpoint := range n.NetworkEndpoints() { + stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { + called = true + return f(fCtx, c) + }) + if called { + break } - - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) - - called = true - stop = f(c) - return true - }) + } if stop { return nil } @@ -68,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo } return nil } + +func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + + c, err := s.cache.get(ctx, endpoint) + if err != nil { + return false + } + + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) + return f(ctx, c) +} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 164815c76..ee40884eb 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -89,29 +89,13 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req var lastErr error var lastAddr string - n.IterateNetworkEndpoints(func(addr string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - + for addr := range n.NetworkEndpoints() { lastAddr = addr - - c, err := s.cache.get(ctx, addr) - if err != nil { - lastErr = fmt.Errorf("can't create client: %w", err) - return false + lastErr = s.apply(ctx, n, addr, req) + if lastErr == nil { + break } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, lastErr = c.Apply(ctx, req) - cancel() - - return lastErr == nil - }) + } if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { @@ -130,6 +114,26 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req return nil } +func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + + c, err := s.cache.get(ctx, addr) + if err != nil { + return fmt.Errorf("can't create client: %w", err) + } + + ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) + _, err = c.Apply(ctx, req) + cancel() + return err +} + func (s *Service) replicateLoop(ctx context.Context) { for range s.replicatorWorkerCount { go s.replicationWorker(ctx) @@ -202,7 +206,7 @@ func newApplyRequest(op *movePair) *ApplyRequest { TreeId: op.treeID, Operation: &LogMove{ ParentId: op.op.Parent, - Meta: op.op.Meta.Bytes(), + Meta: op.op.Bytes(), ChildId: op.op.Child, }, }, diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 3c0214a98..3994d6973 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -9,12 +9,15 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -57,6 +60,7 @@ func New(opts ...Option) *Service { s.replicatorTimeout = defaultReplicatorSendTimeout s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} + s.authorizedKeys.Store(&[][]byte{}) for i := range opts { opts[i](&s.cfg) @@ -83,6 +87,7 @@ func New(opts ...Option) *Service { // Start starts the service. func (s *Service) Start(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) @@ -102,6 +107,7 @@ func (s *Service) Shutdown() { } func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -113,7 +119,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -145,6 +151,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error } func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -156,7 +163,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -200,6 +207,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP } func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -211,7 +219,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } @@ -244,6 +252,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon // Move applies client operation to the specified tree and pushes in queue // for replication on other nodes. func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -255,7 +264,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -287,6 +296,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er } func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -298,7 +308,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return nil, err } @@ -337,14 +347,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } else { var metaValue []KeyValue for _, kv := range m.Items { - for _, attr := range b.GetAttributes() { - if kv.Key == attr { - metaValue = append(metaValue, KeyValue{ - Key: kv.Key, - Value: kv.Value, - }) - break - } + if slices.Contains(b.GetAttributes(), kv.Key) { + metaValue = append(metaValue, KeyValue{ + Key: kv.Key, + Value: kv.Value, + }) } } x.Meta = metaValue @@ -360,6 +367,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -371,7 +379,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return err } @@ -383,8 +391,8 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS if pos < 0 { var cli TreeService_GetSubTreeClient var outErr error - err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(srv.Context(), req) + err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetSubTree(fCtx, req) return true }) if err != nil { @@ -406,7 +414,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS type stackItem struct { values []pilorama.MultiNodeInfo parent pilorama.MultiNode - last *string + last *pilorama.Cursor } func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { @@ -430,10 +438,8 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid } if ms == nil { ms = m.Items - } else { - if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") - } + } else if len(m.Items) != 1 { + return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") } ts = append(ts, m.Time) ps = append(ps, p) @@ -457,14 +463,13 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid break } - nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) + var err error + item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) if err != nil { return err } - item.values = nodes - item.last = last - if len(nodes) == 0 { + if len(item.values) == 0 { stack = stack[:len(stack)-1] continue } @@ -587,6 +592,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di // Apply locally applies operation from the remote node to the tree. func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx)) err := verifyMessage(req) if err != nil { return nil, err @@ -630,6 +636,7 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, } func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -648,8 +655,8 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) if pos < 0 { var cli TreeService_GetOpLogClient var outErr error - err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(srv.Context(), req) + err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetOpLog(fCtx, req) return true }) if err != nil { @@ -680,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) Body: &GetOpLogResponse_Body{ Operation: &LogMove{ ParentId: lm.Parent, - Meta: lm.Meta.Bytes(), + Meta: lm.Bytes(), ChildId: lm.Child, }, }, @@ -694,6 +701,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) } func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -776,3 +784,15 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec return new(HealthcheckResponse), nil } + +func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { + s.authorizedKeys.Store(fromPublicKeys(newKeys)) +} + +func fromPublicKeys(keys keys.PublicKeys) *[][]byte { + buff := make([][]byte, len(keys)) + for i, k := range keys { + buff[i] = k.Bytes() + } + return &buff +} diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index d15438e81..8221a4546 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -38,7 +38,7 @@ var ( // Operation must be one of: // - 1. ObjectPut; // - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error { +func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { err := verifyMessage(req) if err != nil { return err @@ -64,7 +64,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - if err = s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey); err != nil { + if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { return apeErr(err) } return nil @@ -95,8 +95,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { } key := sign.GetKey() - for i := range s.authorizedKeys { - if bytes.Equal(s.authorizedKeys[i], key) { + for _, currentKey := range *s.authorizedKeys.Load() { + if bytes.Equal(currentKey, key) { return true, nil } } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 97f8a727a..8815c227f 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/require" ) +const versionTreeID = "version" + type dummyNetmapSource struct { netmap.Source } @@ -150,6 +152,7 @@ func TestMessageSign(t *testing.T) { apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } + s.cfg.authorizedKeys.Store(&[][]byte{}) rawCID1 := make([]byte, sha256.Size) cid1.Encode(rawCID1) @@ -168,26 +171,26 @@ func TestMessageSign(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRW) t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) cnr.Value.SetBasicACL(acl.Private) t.Run("extension disabled", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) t.Run("invalid key", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) }) t.Run("bearer", func(t *testing.T) { @@ -200,7 +203,7 @@ func TestMessageSign(t *testing.T) { t.Run("invalid bearer", func(t *testing.T) { req.Body.BearerToken = []byte{0xFF} require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer CID", func(t *testing.T) { @@ -209,7 +212,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer owner", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -217,7 +220,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer signature", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -229,20 +232,112 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bv2.StableMarshal(nil) require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + + t.Run("omit override within bt", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + }) + + t.Run("invalid override within bearer token", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) }) t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate, but target user is still set", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) bt.SetImpersonate(true) + var reqSigner user.ID + user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey())) + + bt.ForUser(reqSigner) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate but invalid signer", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) require.NoError(t, bt.Sign(privs[1].PrivateKey)) req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -252,18 +347,18 @@ func TestMessageSign(t *testing.T) { t.Run("put and get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("only get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("none", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) }) } @@ -282,6 +377,25 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token return b } +func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + b.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + }, + Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, + }) + + return b +} + +func testBearerTokenNoOverride() bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + return b +} + func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { ruleGet := chain.Rule{ Status: chain.Allow, diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 9b177d6b6..af355639f 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -2,7 +2,9 @@ package tree import ( "context" + "crypto/ecdsa" "crypto/sha256" + "crypto/tls" "errors" "fmt" "io" @@ -13,6 +15,8 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -20,12 +24,15 @@ import ( metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -71,8 +78,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { var treesToSync []string var outErr error - err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool { - resp, outErr = c.TreeList(ctx, req) + err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = c.TreeList(fCtx, req) if outErr != nil { return false } @@ -240,7 +247,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, Parent: lm.GetParentId(), Child: lm.GetChildId(), } - if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { + if err := m.FromBytes(lm.GetMeta()); err != nil { return err } select { @@ -292,27 +299,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, for i, n := range nodes { errGroup.Go(func() error { var nodeSynced bool - n.IterateNetworkEndpoints(func(addr string) bool { + for addr := range n.NetworkEndpoints() { var a network.Address if err := a.FromString(addr); err != nil { s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + continue } - cc, err := s.createConnection(a) + cc, err := dialTreeService(ctx, a, s.key, s.ds) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + continue } - defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil - return true - }) + _ = cc.Close() + break + } close(nodeOperationStreams[i]) if !nodeSynced { allNodesSynced.Store(false) @@ -337,19 +344,60 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { - return grpc.NewClient(a.URIAddr(), +func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { + cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) + defer cancel() + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, key); err != nil { + return nil, err + } + + // perform some request to check connection + if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { + _ = cc.Close() + return nil, err + } + return cc, nil +} + +func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + host, isTLS, err := client.ParseURI(a.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + + defaultOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInteceptor(), + tracing_grpc.NewUnaryClientInterceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithTransportCredentials(creds), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - ) + grpc.WithDisableServiceConfig(), + } + + return grpc.NewClient(host, append(defaultOpts, opts...)...) } // ErrAlreadySyncing is returned when a service synchronization has already @@ -393,7 +441,7 @@ func (s *Service) syncLoop(ctx context.Context) { start := time.Now() - cnrs, err := s.cfg.cnrSource.List(ctx) + cnrs, err := s.cnrSource.List(ctx) if err != nil { s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go index a34a17f6f..6f114d45b 100644 --- a/pkg/util/ape/parser.go +++ b/pkg/util/ape/parser.go @@ -174,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) { case "deny": if !found { return apechain.AccessDenied, nil - } else if strings.EqualFold(expression, "QuotaLimitReached") { - return apechain.QuotaLimitReached, nil - } else { - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) } + if strings.EqualFold(expression, "QuotaLimitReached") { + return apechain.QuotaLimitReached, nil + } + return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) case "allow": if found { return 0, errUnknownStatusDetail diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go index 547c8d50b..66581878a 100644 --- a/pkg/util/attributes/parser_test.go +++ b/pkg/util/attributes/parser_test.go @@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) { mExp = mSrc } - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { v, ok := mExp[key] require.True(t, ok) require.Equal(t, value, v) delete(mExp, key) - }) + } require.Empty(t, mExp) } diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go index 923412a7f..2589ab786 100644 --- a/pkg/util/http/server.go +++ b/pkg/util/http/server.go @@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server { o(c) } - switch { - case c.shutdownTimeout <= 0: + if c.shutdownTimeout <= 0 { panicOnOptValue("shutdown timeout", c.shutdownTimeout) } diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go index b2942b52a..6337039a9 100644 --- a/pkg/util/keyer/dashboard.go +++ b/pkg/util/keyer/dashboard.go @@ -6,6 +6,7 @@ import ( "os" "text/tabwriter" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -104,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { func base58ToHex(data string) string { val, err := base58.Decode(data) - if err != nil { - panic("produced incorrect base58 value") - } + assert.NoError(err, "produced incorrect base58 value") return hex.EncodeToString(val) } diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go index 269e07d90..413b1d9aa 100644 --- a/pkg/util/logger/log.go +++ b/pkg/util/logger/log.go @@ -4,37 +4,32 @@ import ( "context" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" ) func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Debug(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Debug(msg, fields...) + l.z.Debug(msg, appendContext(ctx, fields...)...) } func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Info(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Info(msg, fields...) + l.z.Info(msg, appendContext(ctx, fields...)...) } func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Warn(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Warn(msg, fields...) + l.z.Warn(msg, appendContext(ctx, fields...)...) } func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Error(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Error(msg, fields...) + l.z.Error(msg, appendContext(ctx, fields...)...) +} + +func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + fields = append(fields, zap.String("trace_id", traceID)) + } + if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined { + fields = append(fields, zap.String("io_tag", ioTag)) + } + return fields } diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 19d3f1ed1..a1998cb1a 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -2,6 +2,7 @@ package logger import ( "fmt" + "time" "git.frostfs.info/TrueCloudLab/zapjournald" "github.com/ssgreg/journald" @@ -12,8 +13,10 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - z *zap.Logger - lvl zap.AtomicLevel + z *zap.Logger + c zapcore.Core + t Tag + w bool } // Prm groups Logger's parameters. @@ -22,16 +25,8 @@ type Logger struct { // Parameters that have been connected to the Logger support its // configuration changing. // -// Passing Prm after a successful connection via the NewLogger, connects -// the Prm to a new instance of the Logger. -// -// See also Reload, SetLevelString. +// See also Logger.Reload, SetLevelString. type Prm struct { - // link to the created Logger - // instance; used for a runtime - // reconfiguration - _log *Logger - // support runtime rereading level zapcore.Level @@ -43,6 +38,12 @@ type Prm struct { // PrependTimestamp specifies whether to prepend a timestamp in the log PrependTimestamp bool + + // Options for zap.Logger + Options []zap.Option + + // map of tag's bit masks to log level, overrides lvl + tl map[Tag]zapcore.Level } const ( @@ -72,20 +73,10 @@ func (p *Prm) SetDestination(d string) error { return nil } -// Reload reloads configuration of a connected instance of the Logger. -// Returns ErrLoggerNotConnected if no connection has been performed. -// Returns any reconfiguration error from the Logger directly. -func (p Prm) Reload() error { - if p._log == nil { - // incorrect logger usage - panic("parameters are not connected to any Logger") - } - - return p._log.reload(p) -} - -func defaultPrm() *Prm { - return new(Prm) +// SetTags parses list of tags with log level. +func (p *Prm) SetTags(tags [][]string) (err error) { + p.tl, err = parseTags(tags) + return err } // NewLogger constructs a new zap logger instance. Constructing with nil @@ -99,10 +90,7 @@ func defaultPrm() *Prm { // - ISO8601 time encoding. // // Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm *Prm) (*Logger, error) { - if prm == nil { - prm = defaultPrm() - } +func NewLogger(prm Prm) (*Logger, error) { switch prm.dest { case DestinationUndefined, DestinationStdout: return newConsoleLogger(prm) @@ -113,11 +101,9 @@ func NewLogger(prm *Prm) (*Logger, error) { } } -func newConsoleLogger(prm *Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - +func newConsoleLogger(prm Prm) (*Logger, error) { c := zap.NewProductionConfig() - c.Level = lvl + c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -129,26 +115,23 @@ func newConsoleLogger(prm *Prm) (*Logger, error) { c.EncoderConfig.TimeKey = "" } - lZap, err := c.Build( + opts := []zap.Option{ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1), - ) + } + opts = append(opts, prm.Options...) + lZap, err := c.Build(opts...) if err != nil { return nil, err } - - l := &Logger{z: lZap, lvl: lvl} - prm._log = l + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func newJournaldLogger(prm *Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - +func newJournaldLogger(prm Prm) (*Logger, error) { c := zap.NewProductionConfig() - c.Level = lvl - c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook } @@ -161,36 +144,100 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) coreWithContext := core.With([]zapcore.Field{ zapjournald.SyslogFacility(zapjournald.LogDaemon), zapjournald.SyslogIdentifier(), zapjournald.SyslogPid(), }) - lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) - - l := &Logger{z: lZap, lvl: lvl} - prm._log = l + var samplerOpts []zapcore.SamplerOption + if c.Sampling.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook)) + } + samplingCore := zapcore.NewSamplerWithOptions( + coreWithContext, + time.Second, + c.Sampling.Initial, + c.Sampling.Thereafter, + samplerOpts..., + ) + opts := []zap.Option{ + zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), + zap.AddCallerSkip(1), + } + opts = append(opts, prm.Options...) + lZap := zap.New(samplingCore, opts...) + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func (l *Logger) reload(prm Prm) error { - l.lvl.SetLevel(prm.level) - return nil -} - -func (l *Logger) WithOptions(options ...zap.Option) { - l.z = l.z.WithOptions(options...) -} - +// With create a child logger with new fields, don't affect the parent. +// Throws panic if tag is unset. func (l *Logger) With(fields ...zap.Field) *Logger { - return &Logger{z: l.z.With(fields...)} + if l.t == 0 { + panic("tag is unset") + } + c := *l + c.z = l.z.With(fields...) + // With called under the logger + c.w = true + return &c +} + +type core struct { + c zapcore.Core + l zap.AtomicLevel +} + +func (c *core) Enabled(lvl zapcore.Level) bool { + return c.l.Enabled(lvl) +} + +func (c *core) With(fields []zapcore.Field) zapcore.Core { + clone := *c + clone.c = clone.c.With(fields) + return &clone +} + +func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return c.c.Check(e, ce) +} + +func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { + return c.c.Write(e, fields) +} + +func (c *core) Sync() error { + return c.c.Sync() +} + +// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. +// Throws panic if provided unsupported tag. +func (l *Logger) WithTag(tag Tag) *Logger { + if tag == 0 || tag > Tag(len(_Tag_index)-1) { + panic("unsupported tag " + tag.String()) + } + if l.w { + panic("unsupported operation for the logger's state") + } + c := *l + c.t = tag + c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { + return &core{ + c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), + l: tagToLogLevel[tag], + } + })) + return &c } func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z.WithOptions(zap.AddCallerSkip(1)), + t: TagMain, + c: z.Core(), } } diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go new file mode 100644 index 000000000..b867ee6cc --- /dev/null +++ b/pkg/util/logger/logger_test.go @@ -0,0 +1,118 @@ +package logger + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func BenchmarkLogger(b *testing.B) { + ctx := context.Background() + m := map[string]Prm{} + + prm := Prm{} + require.NoError(b, prm.SetLevelString("debug")) + m["logging enabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + m["logging disabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) + m["logging enabled via tags"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("debug")) + require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) + m["logging disabled via tags"] = prm + + for k, v := range m { + b.Run(k, func(b *testing.B) { + logger, err := createLogger(v) + require.NoError(b, err) + UpdateLevelForTags(v) + b.ResetTimer() + b.ReportAllocs() + for range b.N { + logger.Info(ctx, "test info") + } + }) + } +} + +type testCore struct { + core zapcore.Core +} + +func (c *testCore) Enabled(lvl zapcore.Level) bool { + return c.core.Enabled(lvl) +} + +func (c *testCore) With(fields []zapcore.Field) zapcore.Core { + c.core = c.core.With(fields) + return c +} + +func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return ce.AddCore(e, c) +} + +func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { + return nil +} + +func (c *testCore) Sync() error { + return c.core.Sync() +} + +func createLogger(prm Prm) (*Logger, error) { + prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + tc := testCore{core: core} + return &tc + })} + return NewLogger(prm) +} + +func TestLoggerOutput(t *testing.T) { + obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) + + prm := Prm{} + require.NoError(t, prm.SetLevelString("debug")) + prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { + return obs + })} + loggerMain, err := NewLogger(prm) + require.NoError(t, err) + UpdateLevelForTags(prm) + + loggerMainWith := loggerMain.With(zap.String("key", "value")) + + require.Panics(t, func() { + loggerMainWith.WithTag(TagShard) + }) + loggerShard := loggerMain.WithTag(TagShard) + loggerShard = loggerShard.With(zap.String("key1", "value1")) + + loggerMorph := loggerMain.WithTag(TagMorph) + loggerMorph = loggerMorph.With(zap.String("key2", "value2")) + + ctx := context.Background() + loggerMain.Debug(ctx, "main") + loggerMainWith.Debug(ctx, "main with") + loggerShard.Debug(ctx, "shard") + loggerMorph.Debug(ctx, "morph") + + require.Len(t, logs.All(), 4) + require.Len(t, logs.FilterFieldKey("key").All(), 1) + require.Len(t, logs.FilterFieldKey("key1").All(), 1) + require.Len(t, logs.FilterFieldKey("key2").All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) + require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) +} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result new file mode 100644 index 000000000..612fa2967 --- /dev/null +++ b/pkg/util/logger/logger_test.result @@ -0,0 +1,46 @@ +goos: linux +goarch: amd64 +pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger +cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz +BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op +PASS +ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go new file mode 100644 index 000000000..1b98f2e62 --- /dev/null +++ b/pkg/util/logger/tag_string.go @@ -0,0 +1,43 @@ +// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. + +package logger + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TagMain-1] + _ = x[TagMorph-2] + _ = x[TagGrpcSvc-3] + _ = x[TagIr-4] + _ = x[TagProcessor-5] + _ = x[TagEngine-6] + _ = x[TagBlobovnicza-7] + _ = x[TagBlobovniczaTree-8] + _ = x[TagBlobstor-9] + _ = x[TagFSTree-10] + _ = x[TagGC-11] + _ = x[TagShard-12] + _ = x[TagWriteCache-13] + _ = x[TagDeleteSvc-14] + _ = x[TagGetSvc-15] + _ = x[TagSearchSvc-16] + _ = x[TagSessionSvc-17] + _ = x[TagTreeSvc-18] + _ = x[TagPolicer-19] + _ = x[TagReplicator-20] +} + +const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" + +var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} + +func (i Tag) String() string { + i -= 1 + if i >= Tag(len(_Tag_index)-1) { + return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] +} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go new file mode 100644 index 000000000..a5386707e --- /dev/null +++ b/pkg/util/logger/tags.go @@ -0,0 +1,94 @@ +package logger + +import ( + "fmt" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +//go:generate stringer -type Tag -linecomment + +type Tag uint8 + +const ( + _ Tag = iota // + TagMain // main + TagMorph // morph + TagGrpcSvc // grpcsvc + TagIr // ir + TagProcessor // processor + TagEngine // engine + TagBlobovnicza // blobovnicza + TagBlobovniczaTree // blobovniczatree + TagBlobstor // blobstor + TagFSTree // fstree + TagGC // gc + TagShard // shard + TagWriteCache // writecache + TagDeleteSvc // deletesvc + TagGetSvc // getsvc + TagSearchSvc // searchsvc + TagSessionSvc // sessionsvc + TagTreeSvc // treesvc + TagPolicer // policer + TagReplicator // replicator + + defaultLevel = zapcore.InfoLevel +) + +var ( + tagToLogLevel = map[Tag]zap.AtomicLevel{} + stringToTag = map[string]Tag{} +) + +func init() { + for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { + tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) + stringToTag[i.String()] = i + } +} + +// parseTags returns: +// - map(always instantiated) of tag to custom log level for that tag; +// - error if it occurred(map is empty). +func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { + m := make(map[Tag]zapcore.Level) + if len(raw) == 0 { + return m, nil + } + for _, item := range raw { + str, level := item[0], item[1] + if len(level) == 0 { + // It is not necessary to parse tags without level, + // because default log level will be used. + continue + } + var l zapcore.Level + err := l.UnmarshalText([]byte(level)) + if err != nil { + return nil, err + } + tmp := strings.Split(str, ",") + for _, tagStr := range tmp { + tag, ok := stringToTag[strings.TrimSpace(tagStr)] + if !ok { + return nil, fmt.Errorf("unsupported tag %s", str) + } + m[tag] = l + } + } + return m, nil +} + +func UpdateLevelForTags(prm Prm) { + for k, v := range tagToLogLevel { + nk, ok := prm.tl[k] + if ok { + v.SetLevel(nk) + } else { + v.SetLevel(prm.level) + } + } +} diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go new file mode 100644 index 000000000..7373e538f --- /dev/null +++ b/pkg/util/testing/netmap_source.go @@ -0,0 +1,36 @@ +package testing + +import ( + "context" + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +var ( + errInvalidDiff = errors.New("invalid diff") + errNetmapNotFound = errors.New("netmap not found") +) + +type TestNetmapSource struct { + Netmaps map[uint64]*netmap.NetMap + CurrentEpoch uint64 +} + +func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { + if diff >= s.CurrentEpoch { + return nil, errInvalidDiff + } + return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) +} + +func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { + if nm, found := s.Netmaps[epoch]; found { + return nm, nil + } + return nil, errNetmapNotFound +} + +func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { + return s.CurrentEpoch, nil +} diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index f2f8881cf..39a420358 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -1,8 +1,10 @@ package internal import ( + cryptorand "crypto/rand" "crypto/sha256" "fmt" + "math/rand" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -14,14 +16,13 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) func GeneratePayloadPool(count uint, size uint) [][]byte { var pool [][]byte for range count { payload := make([]byte, size) - _, _ = rand.Read(payload) + _, _ = cryptorand.Read(payload) pool = append(pool, payload) } diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go index 4da23a295..fafe61eaa 100644 --- a/scripts/populate-metabase/internal/populate.go +++ b/scripts/populate-metabase/internal/populate.go @@ -31,13 +31,10 @@ func PopulateWithObjects( for range count { obj := factory() - - id := []byte(fmt.Sprintf( - "%c/%c/%c", + id := fmt.Appendf(nil, "%c/%c/%c", digits[rand.Int()%len(digits)], digits[rand.Int()%len(digits)], - digits[rand.Int()%len(digits)], - )) + digits[rand.Int()%len(digits)]) prm := meta.PutPrm{} prm.SetObject(obj)