diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile deleted file mode 100644 index 4234de160..000000000 --- a/.ci/Jenkinsfile +++ /dev/null @@ -1,81 +0,0 @@ -def golang = ['1.23', '1.24'] -def golangDefault = "golang:${golang.last()}" - -async { - - for (version in golang) { - def go = version - - task("test/go${go}") { - container("golang:${go}") { - sh 'make test' - } - } - - task("build/go${go}") { - container("golang:${go}") { - for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { - sh """ - make bin/frostfs-${app} - bin/frostfs-${app} --version - """ - } - } - } - } - - task('test/race') { - container(golangDefault) { - sh 'make test GOFLAGS="-count=1 -race"' - } - } - - task('lint') { - container(golangDefault) { - sh 'make lint-install lint' - } - } - - task('staticcheck') { - container(golangDefault) { - sh 'make staticcheck-install staticcheck-run' - } - } - - task('gopls') { - container(golangDefault) { - sh 'make gopls-install gopls-run' - } - } - - task('gofumpt') { - container(golangDefault) { - sh ''' - make fumpt-install - make fumpt - git diff --exit-code --quiet - ''' - } - } - - task('vulncheck') { - container(golangDefault) { - sh ''' - go install golang.org/x/vuln/cmd/govulncheck@latest - govulncheck ./... - ''' - } - } - - task('pre-commit') { - dockerfile(""" - FROM ${golangDefault} - RUN apt update && \ - apt install -y --no-install-recommends pre-commit - """) { - withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { - sh 'pre-commit run --color=always --hook-stage=manual --all-files' - } - } - } -} diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index d568b9607..9129d136e 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.23', '1.24' ] + go_versions: [ '1.22', '1.23' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 190d7764a..7c5af8410 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.22' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml deleted file mode 100644 index fe91d65f9..000000000 --- a/.forgejo/workflows/oci-image.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: OCI image - -on: - push: - workflow_dispatch: - -jobs: - image: - name: Build container images - runs-on: docker - container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm - steps: - - name: Clone git repo - uses: actions/checkout@v3 - - - name: Build OCI image - run: make images - - - name: Push image to OCI registry - run: | - echo "$REGISTRY_PASSWORD" \ - | docker login --username truecloudlab --password-stdin git.frostfs.info - make push-images - if: >- - startsWith(github.ref, 'refs/tags/v') && - (github.event_name == 'workflow_dispatch' || github.event_name == 'push') - env: - REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index c2e293175..b27e7a39a 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.24 + go-version: 1.23 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index f3f5432ce..4f1bebe61 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.23' cache: true - name: Install linters @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.23', '1.24' ] + go_versions: [ '1.22', '1.23' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -53,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.22' cache: true - name: Run tests @@ -68,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.23' cache: true - name: Install staticcheck @@ -104,7 +104,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.24' + go-version: '1.23' cache: true - name: Install gofumpt diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index bc94792d8..cf15005b1 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,8 +18,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.24' - check-latest: true + go-version: '1.23' - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.golangci.yml b/.golangci.yml index e3ec09f60..57e3b4494 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,107 +1,93 @@ -version: "2" +# This file contains all available configuration options +# with their default values. + +# options for analysis running run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 20m + + # include test files or not, default is true tests: false + +# output configuration options output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - tab: - path: stdout - colors: false + - format: tab + +# all available settings of specific linters +linters-settings: + exhaustive: + # indicates that switch statements are to be considered exhaustive if a + # 'default' case is present, even if all enum members aren't listed in the + # switch + default-signifies-exhaustive: true + govet: + # report about shadowed variables + check-shadowing: false + staticcheck: + checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. + funlen: + lines: 80 # default 60 + statements: 60 # default 40 + gocognit: + min-complexity: 40 # default 30 + importas: + no-unaliased: true + no-extra-aliases: false + alias: + pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + target-methods : ["reportFlushError", "reportError"] + disable-packages: ["codes", "err", "res","exec"] + constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + linters: - default: none enable: - - bidichk - - containedctx - - contextcheck - - copyloopvar - - durationcheck - - errcheck - - exhaustive - - funlen - - gocognit - - gocritic - - godot - - importas - - ineffassign - - intrange - - misspell - - perfsprint - - predeclared - - protogetter - - reassign + # mandatory linters + - govet - revive + + # some default golangci-lint linters + - errcheck + - gosimple + - godot + - ineffassign - staticcheck - - testifylint - - truecloudlab-linters - - unconvert - - unparam + - typecheck - unused - - usetesting - - whitespace - settings: - exhaustive: - default-signifies-exhaustive: true - funlen: - lines: 80 - statements: 60 - gocognit: - min-complexity: 40 - gocritic: - disabled-checks: - - ifElseChain - importas: - alias: - - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - no-unaliased: true - no-extra-aliases: false - staticcheck: - checks: - - all - - -QF1002 - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs - disable-packages: - - codes - - err - - res - - exec - target-methods: - - reportFlushError - - reportError - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gci + + # extra linters + - bidichk + - durationcheck + - exhaustive + - copyloopvar - gofmt - goimports - settings: - gci: - sections: - - standard - - default - custom-order: true - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ + - misspell + - predeclared + - reassign + - whitespace + - containedctx + - funlen + - gocognit + - contextcheck + - importas + - truecloudlab-linters + - perfsprint + - testifylint + - protogetter + - intrange + - tenv + disable-all: true + fast: false diff --git a/Makefile b/Makefile index 575eaae6f..f0cdc273c 100755 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ #!/usr/bin/make -f SHELL = bash -.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") @@ -8,16 +7,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.23 -LINT_VERSION ?= 2.0.2 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 +GO_VERSION ?= 1.22 +LINT_VERSION ?= 1.62.0 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2025.1.1 +STATICCHECK_VERSION ?= 2024.1.1 ARCH = amd64 BIN = bin @@ -43,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0 GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) -GOPLS_VERSION ?= v0.17.1 +GOPLS_VERSION ?= v0.15.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) @@ -116,7 +115,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir -p $(PROTOBUF_DIR) + @mkdir $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -140,15 +139,6 @@ images: image-storage image-ir image-cli image-adm # Build dirty local Docker images dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm -# Push FrostFS components' docker image to the registry -push-image-%: - @echo "⇒ Publish FrostFS $* docker image " - @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) - -# Push all Docker images to the registry -.PHONY: push-images -push-images: push-image-storage push-image-ir push-image-cli push-image-adm - # Run `make %` in Golang container docker/%: docker run --rm -t \ @@ -170,7 +160,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir -p $(GOFUMPT_DIR) + @mkdir $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -187,44 +177,21 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... -# Install Gerrit commit-msg hook -review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks -review-install: - @git config remote.review.url \ - || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node - @mkdir -p $(GIT_HOOK_DIR)/ - @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg - @chmod +x $(GIT_HOOK_DIR)/commit-msg - @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg - @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg - -# Create a PR in Gerrit -review: BRANCH ?= master -review: - @git push review HEAD:refs/for/$(BRANCH) \ - --push-option r=e.stratonikov@yadro.com \ - --push-option r=d.stepanov@yadro.com \ - --push-option r=an.nikiforov@yadro.com \ - --push-option r=a.arifullin@yadro.com \ - --push-option r=ekaterina.lebedeva@yadro.com \ - --push-option r=a.savchuk@yadro.com \ - --push-option r=a.chuprov@yadro.com - # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: $(BIN) +lint-install: @rm -rf $(OUTPUT_LINT_DIR) - @mkdir -p $(OUTPUT_LINT_DIR) + @mkdir $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -236,7 +203,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir -p $(STATICCHECK_DIR) + @mkdir $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -249,7 +216,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir -p $(GOPLS_DIR) + @mkdir $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index f194e97f5..87692d013 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -16,16 +16,9 @@ const ( EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagShort = "r" - WalletPath = "wallet" - WalletPathShorthand = "w" - WalletPathUsage = "Path to the wallet" - AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" - AdminWalletPath = "wallet-admin" - AdminWalletUsage = "Path to the admin wallet" - LocalDumpFlag = "local-dump" ProtoConfigPath = "protocol" ContractsInitFlag = "contracts" diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go deleted file mode 100644 index d67b70d2a..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/root.go +++ /dev/null @@ -1,15 +0,0 @@ -package maintenance - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" - "github.com/spf13/cobra" -) - -var RootCmd = &cobra.Command{ - Use: "maintenance", - Short: "Section for maintenance commands", -} - -func init() { - RootCmd.AddCommand(zombie.Cmd) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go deleted file mode 100644 index 1b66889aa..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go +++ /dev/null @@ -1,70 +0,0 @@ -package zombie - -import ( - "crypto/ecdsa" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { - keyDesc := viper.GetString(walletFlag) - if keyDesc == "" { - return &nodeconfig.Key(appCfg).PrivateKey - } - data, err := os.ReadFile(keyDesc) - commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) - - priv, err := keys.NewPrivateKeyFromBytes(data) - if err != nil { - w, err := wallet.NewWalletFromFile(keyDesc) - commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) - return fromWallet(cmd, w, viper.GetString(addressFlag)) - } - return &priv.PrivateKey -} - -func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { - var ( - addr util.Uint160 - err error - ) - - if addrStr == "" { - addr = w.GetChangeAddress() - } else { - addr, err = flags.ParseAddress(addrStr) - commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) - } - - acc := w.GetAccount(addr) - if acc == nil { - commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) - } - - pass, err := getPassword() - commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) - - commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) - - return &acc.PrivateKey().PrivateKey -} - -func getPassword() (string, error) { - // this check allows empty passwords - if viper.IsSet("password") { - return viper.GetString("password"), nil - } - - return input.ReadPassword("Enter password > ") -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go deleted file mode 100644 index f73f33db9..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go +++ /dev/null @@ -1,31 +0,0 @@ -package zombie - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func list(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - var containerID *cid.ID - if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { - containerID = &cid.ID{} - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - } - - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { - if containerID != nil && a.Container() != *containerID { - return nil - } - cmd.Println(a.EncodeToString()) - return nil - })) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go deleted file mode 100644 index cd3a64499..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go +++ /dev/null @@ -1,46 +0,0 @@ -package zombie - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/spf13/cobra" -) - -func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { - addresses := morphconfig.RPCEndpoint(appCfg) - if len(addresses) == 0 { - commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) - } - key := nodeconfig.Key(appCfg) - cli, err := client.New(cmd.Context(), - key, - client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), - client.WithEndpoints(addresses...), - client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), - ) - commonCmd.ExitOnErr(cmd, "create morph client: %w", err) - return cli -} - -func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { - hs, err := morph.NNSContractAddress(client.NNSContainerContractName) - commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) - cc, err := cntClient.NewFromMorph(morph, hs, 0) - commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) - return cc -} - -func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { - hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) - commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) - cli, err := netmapClient.NewFromMorph(morph, hs, 0) - commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) - return cli -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go deleted file mode 100644 index 27f83aec7..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go +++ /dev/null @@ -1,154 +0,0 @@ -package zombie - -import ( - "context" - "fmt" - "math" - "os" - "path/filepath" - "strings" - "sync" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -type quarantine struct { - // mtx protects current field. - mtx sync.Mutex - current int - trees []*fstree.FSTree -} - -func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { - var paths []string - for _, sh := range engineInfo.Shards { - var storagePaths []string - for _, st := range sh.BlobStorInfo.SubStorages { - storagePaths = append(storagePaths, st.Path) - } - if len(storagePaths) == 0 { - continue - } - paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) - } - q, err := newQuarantine(paths) - commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) - return q -} - -func commonPath(paths []string) string { - if len(paths) == 0 { - return "" - } - if len(paths) == 1 { - return paths[0] - } - minLen := math.MaxInt - for _, p := range paths { - if len(p) < minLen { - minLen = len(p) - } - } - - var sb strings.Builder - for i := range minLen { - for _, path := range paths[1:] { - if paths[0][i] != path[i] { - return sb.String() - } - } - sb.WriteByte(paths[0][i]) - } - return sb.String() -} - -func newQuarantine(paths []string) (*quarantine, error) { - var q quarantine - for i := range paths { - f := fstree.New( - fstree.WithDepth(1), - fstree.WithDirNameLen(1), - fstree.WithPath(paths[i]), - fstree.WithPerm(os.ModePerm), - ) - if err := f.Open(mode.ComponentReadWrite); err != nil { - return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) - } - if err := f.Init(); err != nil { - return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) - } - q.trees = append(q.trees, f) - } - return &q, nil -} - -func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - for i := range q.trees { - res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) - if err != nil { - continue - } - return res.Object, nil - } - return nil, &apistatus.ObjectNotFound{} -} - -func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { - for i := range q.trees { - _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) - if err != nil { - continue - } - return nil - } - return &apistatus.ObjectNotFound{} -} - -func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { - data, err := obj.Marshal() - if err != nil { - return err - } - - var prm common.PutPrm - prm.Address = objectcore.AddressOf(obj) - prm.Object = obj - prm.RawData = data - - q.mtx.Lock() - current := q.current - q.current = (q.current + 1) % len(q.trees) - q.mtx.Unlock() - - _, err = q.trees[current].Put(ctx, prm) - return err -} - -func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { - var prm common.IteratePrm - prm.Handler = func(elem common.IterationElement) error { - return f(elem.Address) - } - for i := range q.trees { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - _, err := q.trees[i].Iterate(ctx, prm) - if err != nil { - return err - } - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go deleted file mode 100644 index 0b8f2f172..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -package zombie - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func remove(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - - var containerID cid.ID - cidStr, _ := cmd.Flags().GetString(cidFlag) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - - var objectID *oid.ID - oidStr, _ := cmd.Flags().GetString(oidFlag) - if oidStr != "" { - objectID = &oid.ID{} - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) - } - - if objectID != nil { - var addr oid.Address - addr.SetContainer(containerID) - addr.SetObject(*objectID) - removeObject(cmd, q, addr) - } else { - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { - if addr.Container() != containerID { - return nil - } - removeObject(cmd, q, addr) - return nil - })) - } -} - -func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { - err := q.Delete(cmd.Context(), addr) - if errors.Is(err, new(apistatus.ObjectNotFound)) { - return - } - commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go deleted file mode 100644 index f179c7c2d..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go +++ /dev/null @@ -1,69 +0,0 @@ -package zombie - -import ( - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func restore(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - morphClient := createMorphClient(cmd, appCfg) - cnrCli := createContainerClient(cmd, morphClient) - - var containerID cid.ID - cidStr, _ := cmd.Flags().GetString(cidFlag) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - - var objectID *oid.ID - oidStr, _ := cmd.Flags().GetString(oidFlag) - if oidStr != "" { - objectID = &oid.ID{} - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) - } - - if objectID != nil { - var addr oid.Address - addr.SetContainer(containerID) - addr.SetObject(*objectID) - restoreObject(cmd, storageEngine, q, addr, cnrCli) - } else { - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { - if addr.Container() != containerID { - return nil - } - restoreObject(cmd, storageEngine, q, addr, cnrCli) - return nil - })) - } -} - -func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { - obj, err := q.Get(cmd.Context(), addr) - commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) - rawCID := make([]byte, sha256.Size) - - cid := addr.Container() - cid.Encode(rawCID) - cnr, err := cnrCli.Get(cmd.Context(), rawCID) - commonCmd.ExitOnErr(cmd, "get container: %w", err) - - putPrm := engine.PutPrm{ - Object: obj, - IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), - } - commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) - commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go deleted file mode 100644 index c8fd9e5e5..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go +++ /dev/null @@ -1,123 +0,0 @@ -package zombie - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - flagBatchSize = "batch-size" - flagBatchSizeUsage = "Objects iteration batch size" - cidFlag = "cid" - cidFlagUsage = "Container ID" - oidFlag = "oid" - oidFlagUsage = "Object ID" - walletFlag = "wallet" - walletFlagShorthand = "w" - walletFlagUsage = "Path to the wallet or binary key" - addressFlag = "address" - addressFlagUsage = "Address of wallet account" - moveFlag = "move" - moveFlagUsage = "Move objects from storage engine to quarantine" -) - -var ( - Cmd = &cobra.Command{ - Use: "zombie", - Short: "Zombie objects related commands", - } - scanCmd = &cobra.Command{ - Use: "scan", - Short: "Scan storage engine for zombie objects and move them to quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) - _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) - _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) - _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) - }, - Run: scan, - } - listCmd = &cobra.Command{ - Use: "list", - Short: "List zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - }, - Run: list, - } - restoreCmd = &cobra.Command{ - Use: "restore", - Short: "Restore zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) - }, - Run: restore, - } - removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) - }, - Run: remove, - } -) - -func init() { - initScanCmd() - initListCmd() - initRestoreCmd() - initRemoveCmd() -} - -func initScanCmd() { - Cmd.AddCommand(scanCmd) - - scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) - scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) - scanCmd.Flags().String(addressFlag, "", addressFlagUsage) - scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) -} - -func initListCmd() { - Cmd.AddCommand(listCmd) - - listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - listCmd.Flags().String(cidFlag, "", cidFlagUsage) -} - -func initRestoreCmd() { - Cmd.AddCommand(restoreCmd) - - restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) - restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) -} - -func initRemoveCmd() { - Cmd.AddCommand(removeCmd) - - removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - removeCmd.Flags().String(cidFlag, "", cidFlagUsage) - removeCmd.Flags().String(oidFlag, "", oidFlagUsage) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go deleted file mode 100644 index 268ec4911..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go +++ /dev/null @@ -1,281 +0,0 @@ -package zombie - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "errors" - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" - "golang.org/x/sync/errgroup" -) - -func scan(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) - if batchSize == 0 { - commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) - } - move, _ := cmd.Flags().GetBool(moveFlag) - - storageEngine := newEngine(cmd, appCfg) - morphClient := createMorphClient(cmd, appCfg) - cnrCli := createContainerClient(cmd, morphClient) - nmCli := createNetmapClient(cmd, morphClient) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - pk := getPrivateKey(cmd, appCfg) - - epoch, err := nmCli.Epoch(cmd.Context()) - commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) - - nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) - commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) - - cmd.Printf("Epoch: %d\n", nm.Epoch()) - cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) - - ps := &processStatus{ - statusCount: make(map[status]uint64), - } - - stopCh := make(chan struct{}) - start := time.Now() - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - tick := time.NewTicker(time.Second) - defer tick.Stop() - for { - select { - case <-cmd.Context().Done(): - return - case <-stopCh: - return - case <-tick.C: - fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) - } - } - }() - go func() { - defer wg.Done() - err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) - close(stopCh) - }() - wg.Wait() - commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) - - cmd.Println() - cmd.Println("Status description:") - cmd.Println("undefined -- nothing is clear") - cmd.Println("found -- object is found in cluster") - cmd.Println("quarantine -- object is not found in cluster") - cmd.Println() - for status, count := range ps.statusCount { - cmd.Printf("Status: %s, Count: %d\n", status, count) - } -} - -type status string - -const ( - statusUndefined status = "undefined" - statusFound status = "found" - statusQuarantine status = "quarantine" -) - -func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { - rawCID := make([]byte, sha256.Size) - cid := obj.Address.Container() - cid.Encode(rawCID) - - cnr, err := cnrCli.Get(ctx, rawCID) - if err != nil { - var errContainerNotFound *apistatus.ContainerNotFound - if errors.As(err, &errContainerNotFound) { - // Policer will deal with this object. - return statusFound, nil - } - return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) - } - nm, err := nmCli.NetMap(ctx) - if err != nil { - return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) - } - - nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) - if err != nil { - // Not enough nodes, check all netmap nodes. - nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) - } - - objID := obj.Address.Object() - cnrID := obj.Address.Container() - local := true - raw := false - if obj.ECInfo != nil { - objID = obj.ECInfo.ParentID - local = false - raw = true - } - prm := clientSDK.PrmObjectHead{ - ObjectID: &objID, - ContainerID: &cnrID, - Local: local, - Raw: raw, - } - - var ni clientCore.NodeInfo - for i := range nodes { - for j := range nodes[i] { - if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { - return statusUndefined, fmt.Errorf("parse node info: %w", err) - } - c, err := cc.Get(ni) - if err != nil { - continue - } - res, err := c.ObjectHead(ctx, prm) - if err != nil { - var errECInfo *objectSDK.ECInfoError - if raw && errors.As(err, &errECInfo) { - return statusFound, nil - } - continue - } - if err := apistatus.ErrFromStatus(res.Status()); err != nil { - continue - } - return statusFound, nil - } - } - - if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { - return statusFound, nil - } - return statusQuarantine, nil -} - -func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, - appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, -) error { - cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ - DialTimeout: apiclientconfig.DialTimeout(appCfg), - StreamTimeout: apiclientconfig.StreamTimeout(appCfg), - ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), - Key: pk, - AllowExternal: apiclientconfig.AllowExternal(appCfg), - }) - ctx := cmd.Context() - - var cursor *engine.Cursor - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var prm engine.ListWithCursorPrm - prm.WithCursor(cursor) - prm.WithCount(batchSize) - - res, err := storageEngine.ListWithCursor(ctx, prm) - if err != nil { - if errors.Is(err, engine.ErrEndOfListing) { - return nil - } - return fmt.Errorf("list with cursor: %w", err) - } - - cursor = res.Cursor() - addrList := res.AddressList() - eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(int(batchSize)) - - for i := range addrList { - addr := addrList[i] - eg.Go(func() error { - result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) - if err != nil { - return fmt.Errorf("check object %s status: %w", addr.Address, err) - } - ps.add(result) - - if !move && result == statusQuarantine { - cmd.Println(addr) - return nil - } - - if result == statusQuarantine { - return moveToQuarantine(egCtx, storageEngine, q, addr.Address) - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return fmt.Errorf("process objects batch: %w", err) - } - } -} - -func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { - var getPrm engine.GetPrm - getPrm.WithAddress(addr) - res, err := storageEngine.Get(ctx, getPrm) - if err != nil { - return fmt.Errorf("get object %s from storage engine: %w", addr, err) - } - - if err := q.Put(ctx, res.Object()); err != nil { - return fmt.Errorf("put object %s to quarantine: %w", addr, err) - } - - var delPrm engine.DeletePrm - delPrm.WithForceRemoval() - delPrm.WithAddress(addr) - - if err = storageEngine.Delete(ctx, delPrm); err != nil { - return fmt.Errorf("delete object %s from storage engine: %w", addr, err) - } - return nil -} - -type processStatus struct { - guard sync.RWMutex - statusCount map[status]uint64 - count uint64 -} - -func (s *processStatus) add(st status) { - s.guard.Lock() - defer s.guard.Unlock() - s.statusCount[st]++ - s.count++ -} - -func (s *processStatus) total() uint64 { - s.guard.RLock() - defer s.guard.RUnlock() - return s.count -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go deleted file mode 100644 index 5be34d502..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go +++ /dev/null @@ -1,201 +0,0 @@ -package zombie - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" - fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/panjf2000/ants/v2" - "github.com/spf13/cobra" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { - ngOpts := storageEngineOptions(c) - shardOpts := shardOptions(cmd, c) - e := engine.New(ngOpts...) - for _, opts := range shardOpts { - _, err := e.AddShard(cmd.Context(), opts...) - commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) - } - commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) - commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) - return e -} - -func storageEngineOptions(c *config.Config) []engine.Option { - return []engine.Option{ - engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), - engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), - } -} - -func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { - var result [][]shard.Option - err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { - result = append(result, getShardOpts(cmd, c, sh)) - return nil - }) - commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) - return result -} - -func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { - wc, wcEnabled := getWriteCacheOpts(sh) - return []shard.Option{ - shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - shard.WithRefillMetabase(sh.RefillMetabase()), - shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), - shard.WithMode(sh.Mode()), - shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), - shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), - shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), - shard.WithWriteCache(wcEnabled), - shard.WithWriteCacheOptions(wc), - shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), - shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), - shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), - shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) - return pool - }), - shard.WithLimiter(qos.NewNoopLimiter()), - } -} - -func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { - if wc := sh.WriteCache(); wc != nil && wc.Enabled() { - var result []writecache.Option - result = append(result, - writecache.WithPath(wc.Path()), - writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), - writecache.WithMaxObjectSize(wc.MaxObjectSize()), - writecache.WithFlushWorkersCount(wc.WorkerCount()), - writecache.WithMaxCacheSize(wc.SizeLimit()), - writecache.WithMaxCacheCount(wc.CountLimit()), - writecache.WithNoSync(wc.NoSync()), - writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - writecache.WithQoSLimiter(qos.NewNoopLimiter()), - ) - return result, true - } - return nil, false -} - -func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { - var piloramaOpts []pilorama.Option - if config.BoolSafe(c.Sub("tree"), "enabled") { - pr := sh.Pilorama() - piloramaOpts = append(piloramaOpts, - pilorama.WithPath(pr.Path()), - pilorama.WithPerm(pr.Perm()), - pilorama.WithNoSync(pr.NoSync()), - pilorama.WithMaxBatchSize(pr.MaxBatchSize()), - pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), - ) - } - return piloramaOpts -} - -func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { - return []meta.Option{ - meta.WithPath(sh.Metabase().Path()), - meta.WithPermissions(sh.Metabase().BoltDB().Perm()), - meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), - meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), - meta.WithBoltDBOptions(&bbolt.Options{ - Timeout: 100 * time.Millisecond, - }), - meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - meta.WithEpochState(&epochState{}), - } -} - -func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { - result := []blobstor.Option{ - blobstor.WithCompression(sh.Compression()), - blobstor.WithStorages(getSubStorages(ctx, sh)), - blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - } - - return result -} - -func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { - var ss []blobstor.SubStorage - for _, storage := range sh.BlobStor().Storages() { - switch storage.Type() { - case blobovniczatree.Type: - sub := blobovniczaconfig.From((*config.Config)(storage)) - blobTreeOpts := []blobovniczatree.Option{ - blobovniczatree.WithRootPath(storage.Path()), - blobovniczatree.WithPermissions(storage.Perm()), - blobovniczatree.WithBlobovniczaSize(sub.Size()), - blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), - blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), - blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), - blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), - blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), - blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), - blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), - blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), - blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), - blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), - } - - ss = append(ss, blobstor.SubStorage{ - Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < sh.SmallSizeLimit() - }, - }) - case fstree.Type: - sub := fstreeconfig.From((*config.Config)(storage)) - fstreeOpts := []fstree.Option{ - fstree.WithPath(storage.Path()), - fstree.WithPerm(storage.Perm()), - fstree.WithDepth(sub.Depth()), - fstree.WithNoSync(sub.NoSync()), - fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - } - - ss = append(ss, blobstor.SubStorage{ - Storage: fstree.New(fstreeOpts...), - Policy: func(_ *objectSDK.Object, _ []byte) bool { - return true - }, - }) - default: - // should never happen, that has already - // been handled: when the config was read - } - } - return ss -} - -type epochState struct{} - -func (epochState) CurrentEpoch() uint64 { - return 0 -} diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go index c0c290c5e..beced0d7a 100644 --- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go +++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go @@ -28,7 +28,6 @@ const ( var ( errNoPathsFound = errors.New("no metabase paths found") errNoMorphEndpointsFound = errors.New("no morph endpoints found") - errUpgradeFailed = errors.New("upgrade failed") ) var UpgradeCmd = &cobra.Command{ @@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error { if err := eg.Wait(); err != nil { return err } - allSuccess := true for mb, ok := range result { if ok { cmd.Println(mb, ": success") } else { cmd.Println(mb, ": failed") - allSuccess = false } } - if allSuccess { - return nil - } - return errUpgradeFailed + return nil } func getMetabasePaths(appCfg *config.Config) ([]string, error) { diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 3c332c3f0..914682647 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -3,8 +3,6 @@ package ape import ( "errors" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -78,8 +76,7 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) - ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName}) + ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) var ch util.Uint160 diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index 23dba14f4..be42f2aa5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) } - assert.NoError(w.Err) + if w.Err != nil { + panic(w.Err) + } alphaRes, err := c.InvokeScript(w.Bytes(), nil) if err != nil { @@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan for i := range accounts { emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) } - assert.NoError(w.Err) + if w.Err != nil { + panic(w.Err) + } res, err := c.Run(w.Bytes()) if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index c17fb62ff..65ccc9f9f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: nbuf := make([]byte, 8) - copy(nbuf, v) + copy(nbuf[:], v) n := binary.LittleEndian.Uint64(nbuf) - _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) + _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n))) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: if len(v) == 0 || len(v) > 1 { return helper.InvalidConfigValueErr(k) } - _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) + _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1))) default: - _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) + _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v)))) } } diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index 79685f111..e72dc15e9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/io" @@ -236,7 +235,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd putContainer(bw, ch, cnt) - assert.NoError(bw.Err) + if bw.Err != nil { + panic(bw.Err) + } if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go index 543b5fcb3..5adb480da 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error { } } - assert.NoError(writer.Err, "can't create deployment script") + if writer.Err != nil { + panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) + } if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { return err @@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) } - assert.NoError(bw.Err, "can't create deployment script") - if bw.Len() != start { + if bw.Err != nil { + panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) + } else if bw.Len() != start { writer.WriteBytes(bw.Bytes()) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index fde58fd2b..437e2480d 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -11,7 +11,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -220,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) { if info.version == "" { info.version = "unknown" } - _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", - info.name, info.version, info.hash.StringLE())) + _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n", + info.name, info.version, info.hash.StringLE()))) } _ = tw.Flush() @@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } else { sub.Reset() emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - assert.NoError(sub.Err, "can't create version script") + if sub.Err != nil { + panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) + } script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.WriteBytes(script) + bw.BinWriter.WriteBytes(script) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Opcodes(bw.BinWriter, opcode.PUSH0) } } emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - assert.NoError(bw.Err, "can't create version script") + if bw.Err != nil { + panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) + } res, err := c.InvokeScript(bw.Bytes(), nil) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 7f777db98..db98bb8ad 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,8 +1,6 @@ package frostfsid import ( - "encoding/hex" - "errors" "fmt" "math/big" "sort" @@ -35,16 +33,11 @@ const ( subjectNameFlag = "subject-name" subjectKeyFlag = "subject-key" subjectAddressFlag = "subject-address" - extendedFlag = "extended" + includeNamesFlag = "include-names" groupNameFlag = "group-name" groupIDFlag = "group-id" rootNamespacePlaceholder = "" - - keyFlag = "key" - keyDescFlag = "Key for storing a value in the subject's KV storage" - valueFlag = "value" - valueDescFlag = "Value to be stored in the subject's KV storage" ) var ( @@ -158,23 +151,6 @@ var ( }, Run: frostfsidListGroupSubjects, } - - frostfsidSetKVCmd = &cobra.Command{ - Use: "set-kv", - Short: "Store a key-value pair in the subject's KV storage", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidSetKV, - } - frostfsidDeleteKVCmd = &cobra.Command{ - Use: "delete-kv", - Short: "Delete a value from the subject's KV storage", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - }, - Run: frostfsidDeleteKV, - } ) func initFrostfsIDCreateNamespaceCmd() { @@ -210,7 +186,7 @@ func initFrostfsIDListSubjectsCmd() { Cmd.AddCommand(frostfsidListSubjectsCmd) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") - frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") + frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") } func initFrostfsIDCreateGroupCmd() { @@ -257,22 +233,7 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") -} - -func initFrostfsIDSetKVCmd() { - Cmd.AddCommand(frostfsidSetKVCmd) - frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag) - frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) -} - -func initFrostfsIDDeleteKVCmd() { - Cmd.AddCommand(frostfsidDeleteKVCmd) - frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") - frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag) + frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") } func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { @@ -292,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) { reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaces() commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, sessionID) + items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) namespaces, err := frostfsidclient.ParseNamespaces(items) @@ -337,32 +298,34 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) { } func frostfsidListSubjects(cmd *cobra.Command, _ []string) { - extended, _ := cmd.Flags().GetBool(extendedFlag) + includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) ns := getFrostfsIDNamespace(cmd) inv, _, hash := initInvoker(cmd) reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaceSubjects(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) + subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID)) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) for _, addr := range subAddresses { - if !extended { + if !includeNames { cmd.Println(address.Uint160ToString(addr)) continue } - items, err := reader.GetSubject(addr) + sessionID, it, err := reader.ListSubjects() commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) + items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) + subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - printSubjectInfo(cmd, addr, subj) - cmd.Println() + cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name) } } @@ -402,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroups(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, sessionID) + items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) groups, err := frostfsidclient.ParseGroups(items) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) @@ -440,49 +403,10 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) } -func frostfsidSetKV(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - key, _ := cmd.Flags().GetString(keyFlag) - value, _ := cmd.Flags().GetString(valueFlag) - - if key == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) - } - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value) - - ffsid.addCall(method, args) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "set KV: %w", err) -} - -func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { - subjectAddress := getFrostfsIDSubjectAddress(cmd) - key, _ := cmd.Flags().GetString(keyFlag) - - if key == "" { - commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) - } - - ffsid, err := newFrostfsIDClient(cmd) - commonCmd.ExitOnErr(cmd, "init contract client: %w", err) - - method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key) - - ffsid.addCall(method, args) - - err = ffsid.sendWait() - commonCmd.ExitOnErr(cmd, "delete KV: %w", err) -} - func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) - extended, _ := cmd.Flags().GetBool(extendedFlag) + includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) inv, cs, hash := initInvoker(cmd) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) @@ -491,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) - items, err := readIterator(inv, &it, sessionID) + items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) @@ -500,7 +424,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) for _, subjAddr := range subjects { - if !extended { + if !includeNames { cmd.Println(address.Uint160ToString(subjAddr)) continue } @@ -509,8 +433,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - printSubjectInfo(cmd, subjAddr, subj) - cmd.Println() + cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name) } } @@ -569,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) { return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) } -func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { +func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) { var shouldStop bool res := make([]stackitem.Item, 0) for !shouldStop { - items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) + items, err := inv.TraverseIterator(sessionID, iter, batchSize) if err != nil { return nil, err } res = append(res, items...) - shouldStop = len(items) < iteratorBatchSize + shouldStop = len(items) < batchSize } return res, nil @@ -600,30 +523,3 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui return inv, cs, nmHash } - -func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) { - cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) - pk := "" - if subj.PrimaryKey != nil { - pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) - } - cmd.Printf("Primary key: %s\n", pk) - cmd.Printf("Name: %s\n", subj.Name) - cmd.Printf("Namespace: %s\n", subj.Namespace) - if len(subj.AdditionalKeys) > 0 { - cmd.Printf("Additional keys:\n") - for _, key := range subj.AdditionalKeys { - k := "" - if key != nil { - k = hex.EncodeToString(key.Bytes()) - } - cmd.Printf("- %s\n", k) - } - } - if len(subj.KV) > 0 { - cmd.Printf("KV:\n") - for k, v := range subj.KV { - cmd.Printf("- %s: %s\n", k, v) - } - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 8aad5c5c1..6ffcaa487 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -12,8 +12,6 @@ func init() { initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() - initFrostfsIDSetKVCmd() - initFrostfsIDDeleteKVCmd() initFrostfsIDAddSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index 6499ace5f..eb0444408 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -3,6 +3,9 @@ package helper import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -13,6 +16,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -24,86 +28,32 @@ type LocalActor struct { rpcInvoker invoker.RPCInvoke } -type AlphabetWallets struct { - Label string - Path string -} - -func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) { - w, err := GetAlphabetWallets(v, a.Path) - if err != nil { - return nil, err - } - - var accounts []*wallet.Account - for _, wall := range w { - acc, err := GetWalletAccount(wall, a.Label) - if err != nil { - return nil, err - } - accounts = append(accounts, acc) - } - return accounts, nil -} - -type RegularWallets struct{ Path string } - -func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) { - w, err := getRegularWallet(r.Path) - if err != nil { - return nil, err - } - - return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil -} - // NewLocalActor create LocalActor with accounts form provided wallets. // In case of empty wallets provided created actor with dummy account only for read operation. // // If wallets are provided, the contract client will use accounts with accName name from these wallets. // To determine which account name should be used in a contract client, refer to how the contract // verifies the transaction signature. -func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { +func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) { + walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) var act *actor.Actor var accounts []*wallet.Account - var signers []actor.SignerAccount - if alphabet != nil { - account, err := alphabet.GetAccount(viper.GetViper()) - if err != nil { - return nil, err - } + wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) + commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) - accounts = append(accounts, account...) - signers = append(signers, actor.SignerAccount{ - Signer: transaction.Signer{ - Account: account[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: account[0], - }) + for _, w := range wallets { + acc, err := GetWalletAccount(w, accName) + commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) + accounts = append(accounts, acc) } - - for _, w := range regularWallets { - if w == nil { - continue - } - account, err := w.GetAccount() - if err != nil { - return nil, err - } - - accounts = append(accounts, account...) - signers = append(signers, actor.SignerAccount{ - Signer: transaction.Signer{ - Account: account[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: account[0], - }) - } - - act, err := actor.New(c, signers) + act, err = actor.New(c, []actor.SignerAccount{{ + Signer: transaction.Signer{ + Account: accounts[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: accounts[0], + }}) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 50b5c1ec7..961ceba53 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -6,7 +6,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -14,7 +13,9 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" + nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" @@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (* } func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) + switch c.(type) { + case *rpcclient.Client: + inv := invoker.New(c, nil) + reader := nns2.NewReader(inv, nnsHash) + return reader.IsAvailable(name) + default: + b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) + if err != nil { + return false, fmt.Errorf("`isAvailable`: invalid response: %w", err) + } + + return b, nil + } } func CheckNotaryEnabled(c Client) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index da5ffedae..8e5615baa 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -13,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -22,7 +21,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -30,6 +28,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -376,7 +375,9 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen } act, err = actor.New(c.Client, signers) } else { - assert.False(withConsensus, "BUG: should never happen") + if withConsensus { + panic("BUG: should never happen") + } act, err = c.CommitteeAct, nil } if err != nil { @@ -410,9 +411,11 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { version, err := c.Client.GetVersion() - // error appears only if client - // has not been initialized - assert.NoError(err) + if err != nil { + // error appears only if client + // has not been initialized + panic(err) + } network := version.Protocol.Network // Use parameter context to avoid dealing with signature order. @@ -444,12 +447,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin for i := range tx.Signers { if tx.Signers[i].Account == h { - assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") if i < len(tx.Scripts) { tx.Scripts[i] = *w - } - if i == len(tx.Scripts) { + } else if i == len(tx.Scripts) { tx.Scripts = append(tx.Scripts, *w) + } else { + panic("BUG: invalid signing order") } return nil } @@ -507,7 +510,9 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) emit.Opcodes(bw.BinWriter, opcode.ASSERT) - assert.NoError(bw.Err) + if bw.Err != nil { + panic(bw.Err) + } return bw.Bytes(), false, nil } @@ -519,8 +524,12 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U } func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) - return !avail, err + res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone) + if err != nil { + return false, err + } + + return res.State == vmstate.Halt.String(), nil } func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 46611c177..d0a05d5c7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" @@ -317,7 +316,9 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint func (l *LocalClient) putTransactions() error { // 1. Prepare new block. lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - assert.NoError(err) + if err != nil { + panic(err) + } defer func() { l.transactions = l.transactions[:0] }() b := &block.Block{ @@ -358,7 +359,9 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s w := io.NewBufBinWriter() emit.Array(w.BinWriter, parameters...) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) + if w.Err != nil { + panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) + } return c.InvokeScript(w.Bytes(), signers) } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go index 20abaff0a..fb8f03783 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go @@ -3,7 +3,6 @@ package helper import ( "errors" "fmt" - "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error { return err } for k, v := range m { - if slices.Contains(NetmapConfigKeys, k) { - md[k] = v + for _, key := range NetmapConfigKeys { + if k == key { + md[k] = v + break + } } } return nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index be6b2c6dd..c26aa447b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - "github.com/nspcc-dev/neo-go/cli/input" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" @@ -23,27 +22,6 @@ import ( "github.com/spf13/viper" ) -func getRegularWallet(walletPath string) (*wallet.Wallet, error) { - w, err := wallet.NewWalletFromFile(walletPath) - if err != nil { - return nil, err - } - - password, err := input.ReadPassword("Enter password for wallet:") - if err != nil { - return nil, fmt.Errorf("can't fetch password: %w", err) - } - - for i := range w.Accounts { - if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { - err = fmt.Errorf("can't unlock wallet: %w", err) - break - } - } - - return w, err -} - func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { wallets, err := openAlphabetWallets(v, walletDir) if err != nil { @@ -73,7 +51,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er if errors.Is(err, os.ErrNotExist) { err = nil } else { - err = fmt.Errorf("can't open alphabet wallet: %w", err) + err = fmt.Errorf("can't open wallet: %w", err) } break } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go index 176356378..e127ca545 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -112,7 +111,9 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - assert.NoError(w.Err, "can't wrap register script") + if w.Err != nil { + panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err)) + } } func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 7b7597d91..4c6607f9a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -1,18 +1,21 @@ package initialize import ( + "errors" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -27,8 +30,7 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - reader := neo.NewReader(c.ReadOnlyInvoker) - regPrice, err := reader.GetRegisterPrice() + regPrice, err := getCandidateRegisterPrice(c) if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } @@ -40,7 +42,9 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error { emit.Opcodes(w.BinWriter, opcode.ASSERT) } emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - assert.NoError(w.Err) + if w.Err != nil { + panic(fmt.Sprintf("BUG: %v", w.Err)) + } signers := []actor.SignerAccount{{ Signer: c.GetSigner(false, c.CommitteeAcc), @@ -112,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { neoHash := neo.Hash - ok, err := transferNEOFinished(c) + ok, err := transferNEOFinished(c, neoHash) if ok || err != nil { return err } @@ -135,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { return c.AwaitTx() } -func transferNEOFinished(c *helper.InitializeContext) (bool, error) { - r := neo.NewReader(c.ReadOnlyInvoker) +func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) { + r := nep17.NewReader(c.ReadOnlyInvoker, neoHash) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } + +var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") + +func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) { + switch c.Client.(type) { + case *rpcclient.Client: + inv := invoker.New(c.Client, nil) + reader := neo.NewReader(inv) + return reader.GetRegisterPrice() + default: + neoHash := neo.Hash + res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) + if err != nil { + return 0, err + } + if len(res.Stack) == 0 { + return 0, errGetPriceInvalid + } + bi, err := res.Stack[0].TryInteger() + if err != nil || !bi.IsInt64() { + return 0, errGetPriceInvalid + } + return bi.Int64(), nil + } +} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index bb684b3a9..7f1bfee2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -22,14 +22,15 @@ import ( ) const ( + gasInitialTotalSupply = 30000000 * native.GASFactor // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. initialAlphabetGASAmount = 10_000 * native.GASFactor // initialProxyGASAmount represents the amount of GAS given to a proxy contract. initialProxyGASAmount = 50_000 * native.GASFactor ) -func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { - return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 +func initialCommitteeGASAmount(c *helper.InitializeContext) int64 { + return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 } func transferFunds(c *helper.InitializeContext) error { @@ -41,11 +42,6 @@ func transferFunds(c *helper.InitializeContext) error { return err } - version, err := c.Client.GetVersion() - if err != nil { - return err - } - var transfers []transferTarget for _, acc := range c.Accounts { to := acc.Contract.ScriptHash() @@ -63,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error { transferTarget{ Token: gas.Hash, Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), + Amount: initialCommitteeGASAmount(c), }, transferTarget{ Token: neo.Hash, @@ -87,23 +83,16 @@ func transferFunds(c *helper.InitializeContext) error { // transferFundsFinished checks balances of accounts we transfer GAS to. // The stage is considered finished if the balance is greater than the half of what we need to transfer. func transferFundsFinished(c *helper.InitializeContext) (bool, error) { - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash()) - if err != nil { - return false, err - } + acc := c.Accounts[0] - version, err := c.Client.GetVersion() - if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { + r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) + res, err := r.BalanceOf(acc.Contract.ScriptHash()) + if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 { return false, err } res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) - if err != nil { - return false, err - } - - return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err + return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err } func transferGASToProxy(c *helper.InitializeContext) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index 14f6eb390..1668bb327 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -6,9 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" - "github.com/spf13/viper" ) func initRegisterCmd() { @@ -21,7 +19,6 @@ func initRegisterCmd() { registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") - registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) } @@ -51,7 +48,6 @@ func initDeleteCmd() { deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) } @@ -66,28 +62,3 @@ func deleteDomain(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) cmd.Println("Domain deleted successfully") } - -func initSetAdminCmd() { - Cmd.AddCommand(setAdminCmd) - setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) - setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) - setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage) - _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath) - - _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag) -} - -func setAdmin(cmd *cobra.Command, _ []string) { - c, actor := nnsWriter(cmd) - - name, _ := cmd.Flags().GetString(nnsNameFlag) - w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath)) - commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err) - h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash()) - - _, err = actor.Wait(h, vub, err) - commonCmd.ExitOnErr(cmd, "Set admin error: %w", err) - cmd.Println("Set admin successfully") -} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index e49f62256..b13cbc8a1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -1,11 +1,7 @@ package nns import ( - "errors" - client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -20,32 +16,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) - walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath)) - adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath)) - - var ( - alphabet *helper.AlphabetWallets - regularWallets []*helper.RegularWallets - ) - - if alphabetWalletPath != "" { - alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName} - } - - if walletPath != "" { - regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) - } - - if adminWalletPath != "" { - regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath}) - } - - if alphabet == nil && regularWallets == nil { - commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) - } - - ac, err := helper.NewLocalActor(c, alphabet, regularWallets...) + ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) r := management.NewReader(ac.Invoker) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 9cb47356f..09ed92ab3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -19,7 +19,6 @@ func initAddRecordCmd() { addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) - addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) @@ -41,7 +40,6 @@ func initDelRecordsCmd() { delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) - delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) @@ -54,7 +52,6 @@ func initDelRecordCmd() { delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) - delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index bb84933c6..9bdeaccd9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -39,7 +39,6 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: registerDomain, } @@ -49,7 +48,6 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: deleteDomain, } @@ -77,7 +75,6 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: addRecord, } @@ -95,7 +92,6 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecords, } @@ -105,21 +101,9 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecord, } - setAdminCmd = &cobra.Command{ - Use: "set-admin", - Short: "Sets admin for domain", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) - _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) - _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath)) - }, - Run: setAdmin, - } ) func init() { @@ -132,5 +116,4 @@ func init() { initGetRecordsCmd() initDelRecordsCmd() initDelRecordCmd() - initSetAdminCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go index f2932e87c..686a244f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go @@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { buf := bytes.NewBuffer(nil) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) - _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) - _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) + _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee))) + _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte))) + _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice))) _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go index 24cda45a6..cb575b657 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go @@ -20,32 +20,23 @@ const ( accountAddressFlag = "account" ) -func parseAddresses(cmd *cobra.Command) []util.Uint160 { - var addrs []util.Uint160 - - accs, _ := cmd.Flags().GetStringArray(accountAddressFlag) - for _, acc := range accs { - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - - addrs = append(addrs, addr) - } - return addrs -} - func addProxyAccount(cmd *cobra.Command, _ []string) { - addrs := parseAddresses(cmd) - err := processAccount(cmd, addrs, "addAccount") + acc, _ := cmd.Flags().GetString(accountAddressFlag) + addr, err := address.StringToUint160(acc) + commonCmd.ExitOnErr(cmd, "invalid account: %w", err) + err = processAccount(cmd, addr, "addAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } func removeProxyAccount(cmd *cobra.Command, _ []string) { - addrs := parseAddresses(cmd) - err := processAccount(cmd, addrs, "removeAccount") + acc, _ := cmd.Flags().GetString(accountAddressFlag) + addr, err := address.StringToUint160(acc) + commonCmd.ExitOnErr(cmd, "invalid account: %w", err) + err = processAccount(cmd, addr, "removeAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } -func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { +func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error { wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) if err != nil { return fmt.Errorf("can't initialize context: %w", err) @@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err } bw := io.NewBufBinWriter() - for _, addr := range addrs { - emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) - } + emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go index ad89af2b5..1854c8d2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go @@ -29,15 +29,13 @@ var ( func initProxyAddAccount() { AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") - _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) + AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initProxyRemoveAccount() { RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") - _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) + RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index cc8225c7a..defd898c8 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -5,9 +5,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" @@ -41,8 +41,8 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) + rootCmd.AddCommand(storagecfg.RootCmd) rootCmd.AddCommand(metabase.RootCmd) - rootCmd.AddCommand(maintenance.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go new file mode 100644 index 000000000..77183fb49 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/storagecfg/config.go @@ -0,0 +1,137 @@ +package storagecfg + +const configTemplate = `logger: + level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" + +node: + wallet: + path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented + address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented + password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented + addresses: # list of addresses announced by Storage node in the Network map + - {{ .AnnouncedAddress }} + attribute_0: UN-LOCODE:{{ .Attribute.Locode }} + relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map + +grpc: + num: 1 # total number of listener endpoints + 0: + endpoint: {{ .Endpoint }} # endpoint for gRPC server + tls:{{if .TLSCert}} + enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2) + certificate: {{ .TLSCert }} # path to TLS certificate + key: {{ .TLSKey }} # path to TLS key + {{- else }} + enabled: false # disable TLS for a gRPC connection + {{- end}} + +control: + authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service + {{- range .AuthorizedKeys }} + - {{.}}{{end}} + grpc: + endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service + +morph: + dial_timeout: 20s # timeout for side chain NEO RPC client connection + cache_ttl: 15s # use TTL cache for side chain GET operations + rpc_endpoint: # side chain N3 RPC endpoints + {{- range .MorphRPC }} + - address: wss://{{.}}/ws{{end}} +{{if not .Relay }} +storage: + shard_pool_size: 15 # size of per-shard worker pools used for PUT operations + + shard: + default: # section with the default shard parameters + metabase: + perm: 0644 # permissions for metabase files(directories: +x for current user and group) + + blobstor: + perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + depth: 2 # max depth of object tree storage in FS + small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes + compress: true # turn on/off Zstandard compression (level 3) of stored objects + compression_exclude_content_types: + - audio/* + - video/* + + blobovnicza: + size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes + depth: 1 # max depth of object tree storage in key-value DB + width: 4 # max width of object tree storage in key-value DB + opened_cache_capacity: 50 # maximum number of opened database files + opened_cache_ttl: 5m # ttl for opened database file + opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's + + gc: + remover_batch_size: 200 # number of objects to be removed by the garbage collector + remover_sleep_interval: 5m # frequency of the garbage collector invocation + 0: + mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only" + + metabase: + path: {{ .MetabasePath }} # path to the metabase + + blobstor: + path: {{ .BlobstorPath }} # path to the blobstor +{{end}}` + +const ( + neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221" + balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55" + neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1" + balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf" +) + +var n3config = map[string]struct { + MorphRPC []string + RPC []string + NeoFSContract string + BalanceContract string +}{ + "testnet": { + MorphRPC: []string{ + "rpc01.morph.testnet.fs.neo.org:51331", + "rpc02.morph.testnet.fs.neo.org:51331", + "rpc03.morph.testnet.fs.neo.org:51331", + "rpc04.morph.testnet.fs.neo.org:51331", + "rpc05.morph.testnet.fs.neo.org:51331", + "rpc06.morph.testnet.fs.neo.org:51331", + "rpc07.morph.testnet.fs.neo.org:51331", + }, + RPC: []string{ + "rpc01.testnet.n3.nspcc.ru:21331", + "rpc02.testnet.n3.nspcc.ru:21331", + "rpc03.testnet.n3.nspcc.ru:21331", + "rpc04.testnet.n3.nspcc.ru:21331", + "rpc05.testnet.n3.nspcc.ru:21331", + "rpc06.testnet.n3.nspcc.ru:21331", + "rpc07.testnet.n3.nspcc.ru:21331", + }, + NeoFSContract: neofsTestnetAddress, + BalanceContract: balanceTestnetAddress, + }, + "mainnet": { + MorphRPC: []string{ + "rpc1.morph.fs.neo.org:40341", + "rpc2.morph.fs.neo.org:40341", + "rpc3.morph.fs.neo.org:40341", + "rpc4.morph.fs.neo.org:40341", + "rpc5.morph.fs.neo.org:40341", + "rpc6.morph.fs.neo.org:40341", + "rpc7.morph.fs.neo.org:40341", + }, + RPC: []string{ + "rpc1.n3.nspcc.ru:10331", + "rpc2.n3.nspcc.ru:10331", + "rpc3.n3.nspcc.ru:10331", + "rpc4.n3.nspcc.ru:10331", + "rpc5.n3.nspcc.ru:10331", + "rpc6.n3.nspcc.ru:10331", + "rpc7.n3.nspcc.ru:10331", + }, + NeoFSContract: neofsMainnetAddress, + BalanceContract: balanceMainnetAddress, + }, +} diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go new file mode 100644 index 000000000..8e6a8354e --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go @@ -0,0 +1,433 @@ +package storagecfg + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "net" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + "time" + + netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "github.com/chzyer/readline" + "github.com/nspcc-dev/neo-go/cli/flags" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" + "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + + "github.com/spf13/cobra" +) + +const ( + walletFlag = "wallet" + accountFlag = "account" +) + +const ( + defaultControlEndpoint = "localhost:8090" + defaultDataEndpoint = "localhost" +) + +// RootCmd is a root command of config section. +var RootCmd = &cobra.Command{ + Use: "storage-config [-w wallet] [-a acccount] []", + Short: "Section for storage node configuration commands", + Run: storageConfig, +} + +func init() { + fs := RootCmd.Flags() + + fs.StringP(walletFlag, "w", "", "Path to wallet") + fs.StringP(accountFlag, "a", "", "Wallet account") +} + +type config struct { + AnnouncedAddress string + AuthorizedKeys []string + ControlEndpoint string + Endpoint string + TLSCert string + TLSKey string + MorphRPC []string + Attribute struct { + Locode string + } + Wallet struct { + Path string + Account string + Password string + } + Relay bool + BlobstorPath string + MetabasePath string +} + +func storageConfig(cmd *cobra.Command, args []string) { + outPath := getOutputPath(args) + + historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history") + readline.SetHistoryPath(historyPath) + + var c config + + c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag) + if c.Wallet.Path == "" { + c.Wallet.Path = getPath("Path to the storage node wallet: ") + } + + w, err := wallet.NewWalletFromFile(c.Wallet.Path) + fatalOnErr(err) + + fillWalletAccount(cmd, &c, w) + + accH, err := flags.ParseAddress(c.Wallet.Account) + fatalOnErr(err) + + acc := w.GetAccount(accH) + if acc == nil { + fatalOnErr(errors.New("can't find account in wallet")) + } + + c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) + fatalOnErr(err) + + err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) + fatalOnErr(err) + + c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes())) + + network := readNetwork(cmd) + + c.MorphRPC = n3config[network].MorphRPC + + depositGas(cmd, acc, network) + + c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ") + + endpoint := getDefaultEndpoint(cmd, &c) + c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint)) + if c.Endpoint == "" { + c.Endpoint = endpoint + } + + c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint)) + if c.ControlEndpoint == "" { + c.ControlEndpoint = defaultControlEndpoint + } + + c.TLSCert = getPath("TLS Certificate (optional): ") + if c.TLSCert != "" { + c.TLSKey = getPath("TLS Key: ") + } + + c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ") + if !c.Relay { + p := getPath("Path to the storage directory (all available storage will be used): ") + c.BlobstorPath = filepath.Join(p, "blob") + c.MetabasePath = filepath.Join(p, "meta") + } + + out := applyTemplate(c) + fatalOnErr(os.WriteFile(outPath, out, 0o644)) + + cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`") +} + +func getDefaultEndpoint(cmd *cobra.Command, c *config) string { + var addr, port string + for { + c.AnnouncedAddress = getString("Publicly announced address: ") + validator := netutil.Address{} + err := validator.FromString(c.AnnouncedAddress) + if err != nil { + cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.") + continue + } + uriAddr, err := url.Parse(validator.URIAddr()) + if err != nil { + panic(fmt.Errorf("unexpected error: %w", err)) + } + addr = uriAddr.Hostname() + port = uriAddr.Port() + ip, err := net.ResolveIPAddr("ip", addr) + if err != nil { + cmd.Printf("Can't resolve IP address %s: %v\n", addr, err) + continue + } + + if !ip.IP.IsGlobalUnicast() { + cmd.Println("IP must be global unicast.") + continue + } + cmd.Printf("Resolved IP address: %s\n", ip.String()) + + _, err = strconv.ParseUint(port, 10, 16) + if err != nil { + cmd.Println("Port must be an integer.") + continue + } + + break + } + return net.JoinHostPort(defaultDataEndpoint, port) +} + +func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) { + c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag) + if c.Wallet.Account == "" { + addr := address.Uint160ToString(w.GetChangeAddress()) + c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr)) + if c.Wallet.Account == "" { + c.Wallet.Account = addr + } + } +} + +func readNetwork(cmd *cobra.Command) string { + var network string + for { + network = getString("Choose network [mainnet]/testnet: ") + switch network { + case "": + network = "mainnet" + case "testnet", "mainnet": + default: + cmd.Println(`Network must be either "mainnet" or "testnet"`) + continue + } + break + } + return network +} + +func getOutputPath(args []string) string { + if len(args) != 0 { + return args[0] + } + outPath := getPath("File to write config at [./config.yml]: ") + if outPath == "" { + outPath = "./config.yml" + } + return outPath +} + +func getWalletAccount(w *wallet.Wallet, prompt string) string { + addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts)) + for i := range w.Accounts { + addrs[i] = readline.PcItem(w.Accounts[i].Address) + } + + readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...)) + defer readline.SetAutoComplete(nil) + + s, err := readline.Line(prompt) + fatalOnErr(err) + return strings.TrimSpace(s) // autocompleter can return a string with a trailing space +} + +func getString(prompt string) string { + s, err := readline.Line(prompt) + fatalOnErr(err) + if s != "" { + _ = readline.AddHistory(s) + } + return s +} + +type filenameCompleter struct{} + +func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { + prefix := string(line[:pos]) + dir := filepath.Dir(prefix) + de, err := os.ReadDir(dir) + if err != nil { + return nil, 0 + } + + for i := range de { + name := filepath.Join(dir, de[i].Name()) + if strings.HasPrefix(name, prefix) { + tail := []rune(strings.TrimPrefix(name, prefix)) + if de[i].IsDir() { + tail = append(tail, filepath.Separator) + } + newLine = append(newLine, tail) + } + } + if pos != 0 { + return newLine, pos - len([]rune(dir)) + } + return newLine, 0 +} + +func getPath(prompt string) string { + readline.SetAutoComplete(filenameCompleter{}) + defer readline.SetAutoComplete(nil) + + p, err := readline.Line(prompt) + fatalOnErr(err) + + if p == "" { + return p + } + + _ = readline.AddHistory(p) + + abs, err := filepath.Abs(p) + if err != nil { + fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err)) + } + + return abs +} + +func getConfirmation(def bool, prompt string) bool { + for { + s, err := readline.Line(prompt) + fatalOnErr(err) + + switch strings.ToLower(s) { + case "y", "yes": + return true + case "n", "no": + return false + default: + if len(s) == 0 { + return def + } + } + } +} + +func applyTemplate(c config) []byte { + tmpl, err := template.New("config").Parse(configTemplate) + fatalOnErr(err) + + b := bytes.NewBuffer(nil) + fatalOnErr(tmpl.Execute(b, c)) + + return b.Bytes() +} + +func fatalOnErr(err error) { + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) { + sideClient := initClient(n3config[network].MorphRPC) + balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract) + + sideActor, err := actor.NewSimple(sideClient, acc) + if err != nil { + fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err)) + } + + sideGas := nep17.NewReader(sideActor, balanceHash) + accSH := acc.Contract.ScriptHash() + + balance, err := sideGas.BalanceOf(accSH) + if err != nil { + fatalOnErr(fmt.Errorf("side chain balance: %w", err)) + } + + ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ", + fixedn.ToString(balance, 12))) + if !ok { + return + } + + amountStr := getString("Enter amount in GAS: ") + amount, err := fixedn.FromString(amountStr, 8) + if err != nil { + fatalOnErr(fmt.Errorf("invalid amount: %w", err)) + } + + mainClient := initClient(n3config[network].RPC) + neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract) + + mainActor, err := actor.NewSimple(mainClient, acc) + if err != nil { + fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err)) + } + + mainGas := nep17.New(mainActor, gas.Hash) + + txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil) + if err != nil { + fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err)) + } + + cmd.Print("Waiting for transactions to persist.") + tick := time.NewTicker(time.Second / 2) + defer tick.Stop() + + timer := time.NewTimer(time.Second * 20) + defer timer.Stop() + + at := trigger.Application + +loop: + for { + select { + case <-tick.C: + _, err := mainClient.GetApplicationLog(txHash, &at) + if err == nil { + cmd.Print("\n") + break loop + } + cmd.Print(".") + case <-timer.C: + cmd.Printf("\nTimeout while waiting for transaction to persist.\n") + if getConfirmation(false, "Continue configuration? yes/[no]: ") { + return + } + os.Exit(1) + } + } +} + +func initClient(rpc []string) *rpcclient.Client { + var c *rpcclient.Client + var err error + + shuffled := make([]string, len(rpc)) + copy(shuffled, rpc) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + + for _, endpoint := range shuffled { + c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{ + DialTimeout: time.Second * 2, + RequestTimeout: time.Second * 5, + }) + if err != nil { + continue + } + if err = c.Init(); err != nil { + continue + } + return c + } + + fatalOnErr(fmt.Errorf("can't create N3 client: %w", err)) + panic("unreachable") +} diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 299d0a830..ceae36ae7 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -9,6 +9,7 @@ import ( "io" "os" "slices" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -76,7 +77,9 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain // SortedIDList returns sorted list of identifiers of user's containers. func (x ListContainersRes) SortedIDList() []cid.ID { list := x.cliRes.Containers() - slices.SortFunc(list, cid.ID.Cmp) + slices.SortFunc(list, func(lhs, rhs cid.ID) int { + return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString()) + }) return list } @@ -684,7 +687,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes return nil, fmt.Errorf("read object list: %w", err) } - slices.SortFunc(list, oid.ID.Cmp) + slices.SortFunc(list, func(a, b oid.ID) int { + return strings.Compare(a.EncodeToString(), b.EncodeToString()) + }) return &SearchObjectsRes{ ids: list, @@ -858,8 +863,6 @@ type PatchObjectPrm struct { ReplaceAttribute bool - NewSplitHeader *objectSDK.SplitHeader - PayloadPatches []PayloadPatch } @@ -890,11 +893,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { return nil, fmt.Errorf("init payload reading: %w", err) } - if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ - NewSplitHeader: prm.NewSplitHeader, - NewAttributes: prm.NewAttributes, - ReplaceAttributes: prm.ReplaceAttribute, - }) { + if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) { for _, pp := range prm.PayloadPatches { payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) if err != nil { diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 1eadfa2e1..2d9c45cbd 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go index 6ed21e107..88321176f 100644 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ b/cmd/frostfs-cli/internal/commonflags/api.go @@ -9,7 +9,7 @@ const ( TTL = "ttl" TTLShorthand = "" TTLDefault = 2 - TTLUsage = "The maximum number of intermediate nodes in the request route" + TTLUsage = "TTL value in request meta header" XHeadersKey = "xhdr" XHeadersShorthand = "x" diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go index fad1f6183..cd46d63eb 100644 --- a/cmd/frostfs-cli/internal/commonflags/flags.go +++ b/cmd/frostfs-cli/internal/commonflags/flags.go @@ -28,7 +28,7 @@ const ( RPC = "rpc-endpoint" RPCShorthand = "r" RPCDefault = "" - RPCUsage = "Remote node address (':' or 'grpcs://:')" + RPCUsage = "Remote node address (as 'multiaddr' or ':')" Timeout = "timeout" TimeoutShorthand = "t" diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go index 0927788ba..a86506c37 100644 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ b/cmd/frostfs-cli/modules/bearer/create.go @@ -44,7 +44,6 @@ is set to current epoch + n. _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) - _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) }, } @@ -82,7 +81,7 @@ func createToken(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) if iatRelative || expRelative || nvbRelative { - endpoint := viper.GetString(commonflags.RPC) + endpoint, _ := cmd.Flags().GetString(commonflags.RPC) if len(endpoint) == 0 { commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) } diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index 9632061f1..13fe07995 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) { outputPath, _ := cmd.Flags().GetString(outputFlag) if outputPath != "" { - err := os.WriteFile(outputPath, overrideMarshalled, 0o644) + err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644) commonCmd.ExitOnErr(cmd, "dump error: %w", err) } else { fmt.Print("\n") diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go index fac6eb2cd..8c4ab14f8 100644 --- a/cmd/frostfs-cli/modules/container/get.go +++ b/cmd/frostfs-cli/modules/container/get.go @@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod cmd.Println("created:", container.CreatedAt(cnr)) cmd.Println("attributes:") - for key, val := range cnr.Attributes() { + cnr.IterateAttributes(func(key, val string) { cmd.Printf("\t%s=%s\n", key, val) - } + }) cmd.Println("placement policy:") commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index e4a023d91..bbb8da840 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -102,9 +102,9 @@ func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, i cmd.Println(id.String()) if flagVarListPrintAttr { - for key, val := range cnr.Attributes() { + cnr.IterateUserAttributes(func(key, val string) { cmd.Printf(" %s: %s\n", key, val) - } + }) } } diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index cf4862b4a..40bd4110b 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -5,9 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "maps" "os" - "slices" "strings" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -21,16 +19,15 @@ import ( ) type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo - console *readline.Instance + cmd *cobra.Command + nodes map[string]netmap.NodeInfo } -func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { +func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) { return &policyPlaygroundREPL{ cmd: cmd, nodes: map[string]netmap.NodeInfo{}, - } + }, nil } func (repl *policyPlaygroundREPL) handleLs(args []string) error { @@ -40,10 +37,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { i := 1 for id, node := range repl.nodes { var attrs []string - for k, v := range node.Attributes() { + node.IterateAttributes(func(k, v string) { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - } - fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) + }) + fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } return nil @@ -150,29 +147,12 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { for _, node := range ns { ids = append(ids, hex.EncodeToString(node.PublicKey())) } - fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) + fmt.Printf("\t%2d: %v\n", i+1, ids) } return nil } -func (repl *policyPlaygroundREPL) handleHelp(args []string) error { - if len(args) != 0 { - if _, ok := commands[args[0]]; !ok { - return fmt.Errorf("unknown command: %q", args[0]) - } - fmt.Fprintln(repl.console, commands[args[0]].usage) - return nil - } - - commandList := slices.Collect(maps.Keys(commands)) - slices.Sort(commandList) - for _, command := range commandList { - fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) - } - return nil -} - func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { var nm netmap.NetMap var nodes []netmap.NodeInfo @@ -183,104 +163,15 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } -type commandDescription struct { - descriprion string - usage string -} - -var commands = map[string]commandDescription{ - "list": { - descriprion: "Display all nodes in the netmap", - usage: `Display all nodes in the netmap -Example of usage: - list - 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} - 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} -`, - }, - - "ls": { - descriprion: "Display all nodes in the netmap", - usage: `Display all nodes in the netmap -Example of usage: - ls - 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} - 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} -`, - }, - - "add": { - descriprion: "Add a new node: add attr=value", - usage: `Add a new node -Example of usage: - add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, - }, - - "load": { - descriprion: "Load netmap from file: load ", - usage: `Load netmap from file -Example of usage: - load "netmap.json" -File format (netmap.json): -{ - "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { - "continent": "Europe", - "country": "Poland" - }, - "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { - "continent": "Antarctica", - "country": "Heard Island" - } -}`, - }, - - "remove": { - descriprion: "Remove a node: remove ", - usage: `Remove a node -Example of usage: - remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, - }, - - "rm": { - descriprion: "Remove a node: rm ", - usage: `Remove a node -Example of usage: - rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, - }, - - "eval": { - descriprion: "Evaluate a policy: eval ", - usage: `Evaluate a policy -Example of usage: - eval REP 2`, - }, - - "help": { - descriprion: "Show available commands", - }, -} - -func (repl *policyPlaygroundREPL) handleCommand(args []string) error { - if len(args) == 0 { - return nil - } - - switch args[0] { - case "list", "ls": - return repl.handleLs(args[1:]) - case "add": - return repl.handleAdd(args[1:]) - case "load": - return repl.handleLoad(args[1:]) - case "remove", "rm": - return repl.handleRemove(args[1:]) - case "eval": - return repl.handleEval(args[1:]) - case "help": - return repl.handleHelp(args[1:]) - } - return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) -} +var policyPlaygroundCompleter = readline.NewPrefixCompleter( + readline.PcItem("list"), + readline.PcItem("ls"), + readline.PcItem("add"), + readline.PcItem("load"), + readline.PcItem("remove"), + readline.PcItem("rm"), + readline.PcItem("eval"), +) func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { @@ -299,32 +190,24 @@ func (repl *policyPlaygroundREPL) run() error { } } - if len(viper.GetString(netmapConfigPath)) > 0 { - err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) - commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) + cmdHandlers := map[string]func([]string) error{ + "list": repl.handleLs, + "ls": repl.handleLs, + "add": repl.handleAdd, + "load": repl.handleLoad, + "remove": repl.handleRemove, + "rm": repl.handleRemove, + "eval": repl.handleEval, } - var cfgCompleter []readline.PrefixCompleterInterface - var helpSubItems []readline.PrefixCompleterInterface - - for name := range commands { - if name != "help" { - cfgCompleter = append(cfgCompleter, readline.PcItem(name)) - helpSubItems = append(helpSubItems, readline.PcItem(name)) - } - } - - cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) - completer := readline.NewPrefixCompleter(cfgCompleter...) rl, err := readline.NewEx(&readline.Config{ Prompt: "> ", InterruptPrompt: "^C", - AutoComplete: completer, + AutoComplete: policyPlaygroundCompleter, }) if err != nil { return fmt.Errorf("error initializing readline: %w", err) } - repl.console = rl defer rl.Close() var exit bool @@ -342,8 +225,17 @@ func (repl *policyPlaygroundREPL) run() error { } exit = false - if err := repl.handleCommand(strings.Fields(line)); err != nil { - fmt.Fprintf(repl.console, "error: %v\n", err) + parts := strings.Fields(line) + if len(parts) == 0 { + continue + } + cmd := parts[0] + if handler, exists := cmdHandlers[cmd]; exists { + if err := handler(parts[1:]); err != nil { + fmt.Printf("error: %v\n", err) + } + } else { + fmt.Printf("error: unknown command %q\n", cmd) } } } @@ -354,19 +246,12 @@ var policyPlaygroundCmd = &cobra.Command{ Long: `A REPL for testing placement policies. If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, Run: func(cmd *cobra.Command, _ []string) { - repl := newPolicyPlaygroundREPL(cmd) + repl, err := newPolicyPlaygroundREPL(cmd) + commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) }, } -const ( - netmapConfigPath = "netmap-config" - netmapConfigUsage = "Path to the netmap configuration file" -) - func initContainerPolicyPlaygroundCmd() { commonflags.Init(policyPlaygroundCmd) - policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) - - _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) } diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index b8d7eb046..8032bf09a 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftMinutes := int(leftSeconds / 60) - fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) + sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes)) } func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR hour := int(duration.Seconds() / 3600) minute := int(duration.Seconds()/60) % 60 second := int(duration.Seconds()) % 60 - fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) + sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second)) } } func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if resp.GetBody().GetStartedAt() != nil { startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) + sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339))) } } func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if len(resp.GetBody().GetErrorMessage()) > 0 { - fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) + sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage())) } } @@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes default: status = "undefined" } - fmt.Fprintf(sb, " Status: %s.", status) + sb.WriteString(fmt.Sprintf(" Status: %s.", status)) } func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR } func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", + sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetTotalObjects(), resp.GetBody().GetFailedObjects(), resp.GetBody().GetSkippedObjects(), resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees()) + resp.GetBody().GetFailedTrees())) } func initControlEvacuationShardCmd() { diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 3142d02e7..8bd2dc9cd 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) { tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) _, _ = tw.Write([]byte("#\tName\tType\n")) for i, t := range targets { - _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) + _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))) } _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go deleted file mode 100644 index 4cb4be539..000000000 --- a/cmd/frostfs-cli/modules/control/locate.go +++ /dev/null @@ -1,117 +0,0 @@ -package control - -import ( - "bytes" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" - "github.com/spf13/cobra" -) - -const ( - FullInfoFlag = "full" - FullInfoFlagUsage = "Print full ShardInfo." -) - -var locateObjectCmd = &cobra.Command{ - Use: "locate-object", - Short: "List shards storing the object", - Long: "List shards storing the object", - Run: locateObject, -} - -func initControlLocateObjectCmd() { - initControlFlags(locateObjectCmd) - - flags := locateObjectCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) - _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag) - - flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.") - flags.Bool(FullInfoFlag, false, FullInfoFlagUsage) -} - -func locateObject(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - _ = object.ReadObjectAddress(cmd, &cnr, &obj) - - pk := key.Get(cmd) - - body := new(control.ListShardsForObjectRequest_Body) - body.SetContainerId(cnr.EncodeToString()) - body.SetObjectId(obj.EncodeToString()) - req := new(control.ListShardsForObjectRequest) - req.SetBody(body) - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var err error - var resp *control.ListShardsForObjectResponse - err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.ListShardsForObject(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - shardIDs := resp.GetBody().GetShard_ID() - - isFull, _ := cmd.Flags().GetBool(FullInfoFlag) - if !isFull { - for _, id := range shardIDs { - cmd.Println(base58.Encode(id)) - } - return - } - - // get full shard info - listShardsReq := new(control.ListShardsRequest) - listShardsReq.SetBody(new(control.ListShardsRequest_Body)) - signRequest(cmd, pk, listShardsReq) - var listShardsResp *control.ListShardsResponse - err = cli.ExecRaw(func(client *rawclient.Client) error { - listShardsResp, err = control.ListShards(client, listShardsReq) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody()) - - shards := listShardsResp.GetBody().GetShards() - sortShardsByID(shards) - shards = filterShards(shards, shardIDs) - - isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) - if isJSON { - prettyPrintShardsJSON(cmd, shards) - } else { - prettyPrintShards(cmd, shards) - } -} - -func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo { - var res []control.ShardInfo - for _, id := range ids { - for _, inf := range info { - if bytes.Equal(inf.Shard_ID, id) { - res = append(res, inf) - } - } - } - return res -} diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go index 3abfe80cb..b20d3618e 100644 --- a/cmd/frostfs-cli/modules/control/root.go +++ b/cmd/frostfs-cli/modules/control/root.go @@ -39,7 +39,6 @@ func init() { listRulesCmd, getRuleCmd, listTargetsCmd, - locateObjectCmd, ) initControlHealthCheckCmd() @@ -53,5 +52,4 @@ func init() { initControlListRulesCmd() initControGetRuleCmd() initControlListTargetsCmd() - initControlLocateObjectCmd() } diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go index 26a1ba883..87c4f3b3d 100644 --- a/cmd/frostfs-cli/modules/control/set_netmap_status.go +++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go @@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client. var resp *control.GetNetmapStatusResponse var err error err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.GetNetmapStatus(cmd.Context(), client, req) + resp, err = control.GetNetmapStatus(client, req) return err }) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go index d0c9a641b..80e4a0c87 100644 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ b/cmd/frostfs-cli/modules/control/writecache.go @@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{ Use: "seal", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", - Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", + Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.", Run: sealWritecache, } diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index 5da66dcd9..ae4bb329a 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -62,11 +62,11 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("state:", stateWord) - for s := range i.NetworkEndpoints() { + netmap.IterateNetworkEndpoints(i, func(s string) { cmd.Println("address:", s) - } + }) - for key, value := range i.Attributes() { + i.IterateAttributes(func(key, value string) { cmd.Printf("attribute: %s=%s\n", key, value) - } + }) } diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go index 08a9ac4c8..e4e9cddb8 100644 --- a/cmd/frostfs-cli/modules/object/delete.go +++ b/cmd/frostfs-cli/modules/object/delete.go @@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) } - objAddr = ReadObjectAddress(cmd, &cnr, &obj) + objAddr = readObjectAddress(cmd, &cnr, &obj) } pk := key.GetOrGenerate(cmd) diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go index 7312f5384..f1edccba2 100644 --- a/cmd/frostfs-cli/modules/object/get.go +++ b/cmd/frostfs-cli/modules/object/get.go @@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := ReadObjectAddress(cmd, &cnr, &obj) + objAddr := readObjectAddress(cmd, &cnr, &obj) filename := cmd.Flag(fileFlag).Value.String() out, closer := createOutWriter(cmd, filename) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index 25df375d4..461c35f30 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -41,7 +41,7 @@ func initObjectHashCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") + flags.String("range", "", "Range to take hash from in the form offset1:length1,...") _ = objectHashCmd.MarkFlagRequired("range") flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") @@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := ReadObjectAddress(cmd, &cnr, &obj) + objAddr := readObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index 97e996cad..70c273443 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := ReadObjectAddress(cmd, &cnr, &obj) + objAddr := readObjectAddress(cmd, &cnr, &obj) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index d67db9f0d..53dd01868 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -18,7 +18,6 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" - "github.com/spf13/viper" ) // object lock command. @@ -79,7 +78,7 @@ var objectLockCmd = &cobra.Command{ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - endpoint := viper.GetString(commonflags.RPC) + endpoint, _ := cmd.Flags().GetString(commonflags.RPC) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 476238651..31682c0e1 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "slices" "sync" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -49,12 +48,6 @@ type ecHeader struct { parent oid.ID } -type objectCounter struct { - sync.Mutex - total uint32 - isECcounted bool -} - type objectPlacement struct { requiredNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo @@ -63,7 +56,6 @@ type objectPlacement struct { type objectNodesResult struct { errors []error placements map[oid.ID]objectPlacement - total uint32 } type ObjNodesDataObject struct { @@ -109,23 +101,23 @@ func initObjectNodesCmd() { func objectNodes(cmd *cobra.Command, _ []string) { var cnrID cid.ID var objID oid.ID - ReadObjectAddress(cmd, &cnrID, &objID) + readObjectAddress(cmd, &cnrID, &objID) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) + objects := getPhyObjects(cmd, cnrID, objID, cli, pk) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - getActualPlacement(cmd, netmap, pk, objects, count, result) + getActualPlacement(cmd, netmap, pk, objects, result) printPlacement(cmd, objID, objects, result) } -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { +func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -153,7 +145,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C parent: res.Header().ECHeader().Parent(), } } - return []phyObject{obj}, 1 + return []phyObject{obj} } var errSplitInfo *objectSDK.SplitInfoError @@ -163,34 +155,29 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 + return getECObjectChunks(cmd, cnrID, objID, ecInfoError) } commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil, 0 + return nil } -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { - members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total +func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject { + members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) + return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead) } -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { - var total int +func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID { splitInfo := errSplitInfo.SplitInfo() if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { - if total = len(members); total > 0 { - total-- // linking object is not data object - } - return members, total + return members } if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members, len(members) + return members } - members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) - return members, len(members) + return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) } func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { @@ -333,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem } placementBuilder := placement.NewNetworkMapBuilder(netmap) for _, object := range objects { - placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) for repIdx, rep := range placement { numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() @@ -371,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem placementObjectID = object.ecHeader.parent } placementBuilder := placement.NewNetworkMapBuilder(netmap) - placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) for _, vector := range placement { @@ -396,11 +383,8 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem } } -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { +func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) { resultMtx := &sync.Mutex{} - counter := &objectCounter{ - total: uint32(count), - } candidates := getNodesToCheckObjectExistance(cmd, netmap, result) @@ -417,7 +401,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. for _, object := range objects { eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) + stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) resultMtx.Lock() defer resultMtx.Unlock() if err == nil && stored { @@ -436,7 +420,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) - result.total = counter.total } func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { @@ -461,11 +444,17 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N var cli *client.Client var addresses []string if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { - addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) + candidate.IterateNetworkEndpoints(func(s string) bool { + addresses = append(addresses, s) + return false + }) addresses = append(addresses, candidate.ExternalAddresses()...) } else { addresses = append(addresses, candidate.ExternalAddresses()...) - addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) + candidate.IterateNetworkEndpoints(func(s string) bool { + addresses = append(addresses, s) + return false + }) } var lastErr error @@ -489,7 +478,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N return cli, nil } -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { +func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -504,14 +493,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, res, err := internalclient.HeadObject(ctx, prmHead) if err == nil && res != nil { - if res.Header().ECHeader() != nil { - counter.Lock() - defer counter.Unlock() - if !counter.isECcounted { - counter.total *= res.Header().ECHeader().Total() - } - counter.isECcounted = true - } return true, nil } var notFound *apistatus.ObjectNotFound @@ -531,8 +512,7 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) - fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) + fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) for _, object := range objects { fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index ebbde76a2..8f03885ab 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -2,7 +2,6 @@ package object import ( "fmt" - "os" "strconv" "strings" @@ -10,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -22,7 +20,6 @@ const ( replaceAttrsFlagName = "replace-attrs" rangeFlagName = "range" payloadFlagName = "payload" - splitHeaderFlagName = "split-header" ) var objectPatchCmd = &cobra.Command{ @@ -49,18 +46,17 @@ func initObjectPatchCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") + flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") - flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") } func patch(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := ReadObjectAddress(cmd, &cnr, &obj) + objAddr := readObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeSlice(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) { prm.NewAttributes = newAttrs prm.ReplaceAttribute = replaceAttrs - prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) - for i := range ranges { prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ Range: ranges[i], @@ -105,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) { } func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) - if err != nil { - return nil, err + var rawAttrs []string + + raw := cmd.Flag(newAttrsFlagName).Value.String() + if len(raw) != 0 { + rawAttrs = strings.Split(raw, ",") } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes @@ -153,22 +149,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string { v, _ := cmd.Flags().GetStringSlice(payloadFlagName) return v } - -func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { - path, _ := cmd.Flags().GetString(splitHeaderFlagName) - if path == "" { - return nil - } - - data, err := os.ReadFile(path) - commonCmd.ExitOnErr(cmd, "read file error: %w", err) - - splitHdrV2 := new(objectV2.SplitHeader) - err = splitHdrV2.Unmarshal(data) - if err != nil { - err = splitHdrV2.UnmarshalJSON(data) - commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) - } - - return objectSDK.NewSplitHeaderFromV2(splitHdrV2) -} diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go index 9e8a7cc6f..affe9bbba 100644 --- a/cmd/frostfs-cli/modules/object/put.go +++ b/cmd/frostfs-cli/modules/object/put.go @@ -50,7 +50,7 @@ func initObjectPutCmd() { flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") + flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2") flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") @@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute { } func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - rawAttrs, err := cmd.Flags().GetStringSlice("attributes") - if err != nil { - return nil, err + var rawAttrs []string + + raw := cmd.Flag("attributes").Value.String() + if len(raw) != 0 { + rawAttrs = strings.Split(raw, ",") } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index 6ec508ae2..ad4bc3d59 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -38,7 +38,7 @@ func initObjectRangeCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.StringSlice("range", nil, "Range to take data from in the form offset:length") + flags.String("range", "", "Range to take data from in the form offset:length") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.Bool(rawFlag, false, rawFlagDesc) } @@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := ReadObjectAddress(cmd, &cnr, &obj) + objAddr := readObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool { if ok { toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toProto, _ := cmd.Flags().GetBool("proto") - if !toJSON && !toProto { + if !(toJSON || toProto) { cmd.PrintErrln("Object is erasure-encoded, ec information received.") } printECInfo(cmd, errECInfo.ECInfo()) @@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) { } func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { - vs, err := cmd.Flags().GetStringSlice("range") - if len(vs) == 0 || err != nil { - return nil, err + v := cmd.Flag("range").Value.String() + if len(v) == 0 { + return nil, nil } + vs := strings.Split(v, ",") rs := make([]objectSDK.Range, len(vs)) for i := range vs { before, after, found := strings.Cut(vs[i], rangeSep) diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index 8e4e8b287..b090c9f8c 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string { return xs } -func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { +func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { readCID(cmd, cnr) readOID(cmd, obj) @@ -262,8 +262,13 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client if _, ok := dst.(*internal.DeleteObjectPrm); ok { common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - objs = collectObjectRelatives(cmd, cli, cnr, *obj) - objs = append(objs, *obj) + rels := collectObjectRelatives(cmd, cli, cnr, *obj) + + if len(rels) == 0 { + objs = []oid.ID{*obj} + } else { + objs = append(rels, *obj) + } } } diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index d71a94b98..a70624ac8 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,19 +2,18 @@ package tree import ( "context" - "crypto/tls" "fmt" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -33,29 +32,23 @@ func _client() (tree.TreeServiceClient, error) { return nil, err } - host, isTLS, err := client.ParseURI(netAddr.URIAddr()) - if err != nil { - return nil, err - } - - creds := insecure.NewCredentials() - if isTLS { - creds = credentials.NewTLS(&tls.Config{}) - } - opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - tracing.NewUnaryClientInterceptor(), + metrics.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( + metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - grpc.WithDisableServiceConfig(), - grpc.WithTransportCredentials(creds), } - cc, err := grpc.NewClient(host, opts...) + if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) return tree.NewTreeServiceClient(cc), err } diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 13a747ba6..09af08525 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -4,14 +4,11 @@ import ( "context" "os" "os/signal" - "strconv" "syscall" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/spf13/cast" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -41,33 +38,13 @@ func reloadConfig() error { } cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) audit.Store(cfg.GetBool("audit.enabled")) - var logPrm logger.Prm err = logPrm.SetLevelString(cfg.GetString("logger.level")) if err != nil { return err } - err = logPrm.SetTags(loggerTags()) - if err != nil { - return err - } - logger.UpdateLevelForTags(logPrm) + logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") - return nil -} - -func loggerTags() [][]string { - var res [][]string - for i := 0; ; i++ { - var item []string - index := strconv.FormatInt(int64(i), 10) - names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) - if names == "" { - break - } - item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) - res = append(res, item) - } - return res + return logPrm.Reload() } func watchForSignal(ctx context.Context, cancel func()) { diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index 799feb784..ade64ba84 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -31,6 +31,7 @@ const ( var ( wg = new(sync.WaitGroup) intErr = make(chan error) // internal inner ring errors + logPrm = new(logger.Prm) innerRing *innerring.Server pprofCmp *pprofComponent metricsCmp *httpComponent @@ -69,7 +70,6 @@ func main() { metrics := irMetrics.NewInnerRingMetrics() - var logPrm logger.Prm err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) @@ -80,14 +80,10 @@ func main() { exitErr(err) logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") - err = logPrm.SetTags(loggerTags()) - exitErr(err) log, err = logger.NewLogger(logPrm) exitErr(err) - logger.UpdateLevelForTags(logPrm) - ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go index 7b0e25f3d..5a41f945c 100644 --- a/cmd/frostfs-lens/internal/meta/tui.go +++ b/cmd/frostfs-lens/internal/meta/tui.go @@ -2,17 +2,13 @@ package meta import ( "context" - "encoding/binary" - "errors" "fmt" common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" - schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" - "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -31,11 +27,6 @@ Available search filters: var initialPrompt string -var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{ - 2: schema.MetabaseParserV2, - 3: schema.MetabaseParserV3, -} - func init() { common.AddComponentPathFlag(tuiCMD, &vPath) @@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error { } defer db.Close() - schemaVersion, hasVersion := lookupSchemaVersion(cmd, db) - if !hasVersion { - return errors.New("couldn't detect schema version") - } - - metabaseParser, ok := parserPerSchemaVersion[schemaVersion] - if !ok { - return fmt.Errorf("unknown schema version %d", schemaVersion) - } - // Need if app was stopped with Ctrl-C. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, metabaseParser, nil) + ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil) _ = ui.AddFilter("cid", tui.CIDParser, "CID") _ = ui.AddFilter("oid", tui.OIDParser, "OID") @@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } - -var ( - shardInfoBucket = []byte{5} - versionRecord = []byte("version") -) - -func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) { - err := db.View(func(tx *bbolt.Tx) error { - bkt := tx.Bucket(shardInfoBucket) - if bkt == nil { - return nil - } - rec := bkt.Get(versionRecord) - if rec == nil { - return nil - } - - version = binary.LittleEndian.Uint64(rec) - ok = true - - return nil - }) - if err != nil { - common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err)) - } - - return -} diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go index 077a68785..9bad19032 100644 --- a/cmd/frostfs-lens/internal/schema/common/schema.go +++ b/cmd/frostfs-lens/internal/schema/common/schema.go @@ -3,8 +3,6 @@ package common import ( "errors" "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) type FilterResult byte @@ -73,7 +71,11 @@ func (fp FallbackParser) ToParser() Parser { func (p Parser) ToFallbackParser() FallbackParser { return func(key, value []byte) (SchemaEntry, Parser) { entry, next, err := p(key, value) - assert.NoError(err, "couldn't use that parser as a fallback parser") + if err != nil { + panic(fmt.Errorf( + "couldn't use that parser as a fallback parser, it returned an error: %w", err, + )) + } return entry, next } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go index 4e6bbf08a..24cc0e52d 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go @@ -80,15 +80,10 @@ var ( }, ) - UserAttributeParserV2 = NewUserAttributeKeyBucketParser( + UserAttributeParser = NewUserAttributeKeyBucketParser( NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), ) - UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys( - NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), - []string{"FilePath", "S3-Access-Box-CRDT-Name"}, - ) - PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ cidResolver: StrictResolver, oidResolver: StrictResolver, @@ -113,14 +108,4 @@ var ( cidResolver: StrictResolver, oidResolver: LenientResolver, }) - - ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{ - cidResolver: LenientResolver, - oidResolver: LenientResolver, - }) - - ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{ - cidResolver: StrictResolver, - oidResolver: LenientResolver, - }) ) diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go index 42a24c594..2fb122940 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go @@ -22,31 +22,27 @@ const ( Split ContainerCounters ECInfo - ExpirationEpochToObject - ObjectToExpirationEpoch ) var x = map[Prefix]string{ - Graveyard: "Graveyard", - Garbage: "Garbage", - ToMoveIt: "To Move It", - ContainerVolume: "Container Volume", - Locked: "Locked", - ShardInfo: "Shard Info", - Primary: "Primary", - Lockers: "Lockers", - Tombstone: "Tombstone", - Small: "Small", - Root: "Root", - Owner: "Owner", - UserAttribute: "User Attribute", - PayloadHash: "Payload Hash", - Parent: "Parent", - Split: "Split", - ContainerCounters: "Container Counters", - ECInfo: "EC Info", - ExpirationEpochToObject: "Exp. Epoch to Object", - ObjectToExpirationEpoch: "Object to Exp. Epoch", + Graveyard: "Graveyard", + Garbage: "Garbage", + ToMoveIt: "To Move It", + ContainerVolume: "Container Volume", + Locked: "Locked", + ShardInfo: "Shard Info", + Primary: "Primary", + Lockers: "Lockers", + Tombstone: "Tombstone", + Small: "Small", + Root: "Root", + Owner: "Owner", + UserAttribute: "User Attribute", + PayloadHash: "Payload Hash", + Parent: "Parent", + Split: "Split", + ContainerCounters: "Container Counters", + ECInfo: "EC Info", } func (p Prefix) String() string { diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go index 62d126f88..db90bddbd 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go @@ -9,7 +9,7 @@ import ( func (b *PrefixBucket) String() string { return common.FormatSimple( - fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, ) } @@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string { return fmt.Sprintf( "%s CID %s", common.FormatSimple( - fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple(b.id.String(), tcell.ColorAqua), ) @@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string { func (b *UserAttributeKeyBucket) String() string { return fmt.Sprintf("%s CID %s ATTR-KEY %s", common.FormatSimple( - fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple( fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go index 7355c3d9e..82b47dd85 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go @@ -2,7 +2,6 @@ package buckets import ( "errors" - "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -58,11 +57,10 @@ var ( ) var ( - ErrNotBucket = errors.New("not a bucket") - ErrInvalidKeyLength = errors.New("invalid key length") - ErrInvalidValueLength = errors.New("invalid value length") - ErrInvalidPrefix = errors.New("invalid prefix") - ErrUnexpectedAttributeKey = errors.New("unexpected attribute key") + ErrNotBucket = errors.New("not a bucket") + ErrInvalidKeyLength = errors.New("invalid key length") + ErrInvalidValueLength = errors.New("invalid value length") + ErrInvalidPrefix = errors.New("invalid prefix") ) func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { @@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa } func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { - return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil) -} - -func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser { return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { if value != nil { return nil, nil, ErrNotBucket @@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys [] return nil, nil, err } b.key = string(key[33:]) - - if len(keys) != 0 && !slices.Contains(keys, b.key) { - return nil, nil, ErrUnexpectedAttributeKey - } - return &b, next, nil } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go index 4cc9e8765..ea095e207 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/parser.go +++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go @@ -5,30 +5,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" ) -var MetabaseParserV3 = common.WithFallback( - common.Any( - buckets.GraveyardParser, - buckets.GarbageParser, - buckets.ContainerVolumeParser, - buckets.LockedParser, - buckets.ShardInfoParser, - buckets.PrimaryParser, - buckets.LockersParser, - buckets.TombstoneParser, - buckets.SmallParser, - buckets.RootParser, - buckets.UserAttributeParserV3, - buckets.ParentParser, - buckets.SplitParser, - buckets.ContainerCountersParser, - buckets.ECInfoParser, - buckets.ExpirationEpochToObjectParser, - buckets.ObjectToExpirationEpochParser, - ), - common.RawParser.ToFallbackParser(), -) - -var MetabaseParserV2 = common.WithFallback( +var MetabaseParser = common.WithFallback( common.Any( buckets.GraveyardParser, buckets.GarbageParser, @@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback( buckets.SmallParser, buckets.RootParser, buckets.OwnerParser, - buckets.UserAttributeParserV2, + buckets.UserAttributeParser, buckets.PayloadHashParser, buckets.ParentParser, buckets.SplitParser, diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go index 477c4fc9d..2dda15b4f 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go @@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string { func (r *ECInfoRecord) DetailedString() string { return spew.Sdump(*r) } - -func (r *ExpirationEpochToObjectRecord) DetailedString() string { - return spew.Sdump(*r) -} - -func (r *ObjectToExpirationEpochRecord) DetailedString() string { - return spew.Sdump(*r) -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go index e038911d7..880a7a8ff 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go @@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult { return common.No } } - -func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "cid": - id := val.(cid.ID) - return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No) - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) - default: - return common.No - } -} - -func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult { - switch typ { - case "oid": - id := val.(oid.ID) - return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) - default: - return common.No - } -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go index 5d846cb75..1b070e2a0 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go @@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e } return &r, nil, nil } - -func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 72 { - return nil, nil, ErrInvalidKeyLength - } - - var ( - r ExpirationEpochToObjectRecord - err error - ) - - r.epoch = binary.BigEndian.Uint64(key[:8]) - if err = r.cnt.Decode(key[8:40]); err != nil { - return nil, nil, err - } - if err = r.obj.Decode(key[40:]); err != nil { - return nil, nil, err - } - - return &r, nil, nil -} - -func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { - if len(key) != 32 { - return nil, nil, ErrInvalidKeyLength - } - if len(value) != 8 { - return nil, nil, ErrInvalidValueLength - } - - var ( - r ObjectToExpirationEpochRecord - err error - ) - - if err = r.obj.Decode(key); err != nil { - return nil, nil, err - } - r.epoch = binary.LittleEndian.Uint64(value) - - return &r, nil, nil -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go index f71244625..ec0ab8e1a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go @@ -2,7 +2,6 @@ package records import ( "fmt" - "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "github.com/gdamore/tcell/v2" @@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string { len(r.ids), ) } - -func (r *ExpirationEpochToObjectRecord) String() string { - return fmt.Sprintf( - "exp. epoch %s %c CID %s OID %s", - common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua), - tview.Borders.Vertical, - common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua), - common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), - ) -} - -func (r *ObjectToExpirationEpochRecord) String() string { - return fmt.Sprintf( - "OID %s %c exp. epoch %s", - common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), - tview.Borders.Vertical, - common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua), - ) -} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go index 0809cad1a..34c1c29fd 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go @@ -79,15 +79,4 @@ type ( id oid.ID ids []oid.ID } - - ExpirationEpochToObjectRecord struct { - epoch uint64 - cnt cid.ID - obj oid.ID - } - - ObjectToExpirationEpochRecord struct { - obj oid.ID - epoch uint64 - } ) diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go index 3bfe2608b..7d70b27b2 100644 --- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go +++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go @@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, r.addr.SetContainer(cnr) r.addr.SetObject(obj) - r.data = value + r.data = value[:] return &r, nil, nil } diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go index 2d3b20792..3f5088e7a 100644 --- a/cmd/frostfs-lens/internal/tui/buckets.go +++ b/cmd/frostfs-lens/internal/tui/buckets.go @@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren( path := parentBucket.Path parser := parentBucket.NextParser - buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) + buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) + if err != nil { + return err + } for item := range buffer { if item.err != nil { @@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren( } bucket := item.val - var err error bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) if err != nil { return err @@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter( defer cancel() // Check the current bucket's nested buckets if exist - bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) + bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) + if err != nil { + return false, err + } for item := range bucketsBuffer { if item.err != nil { @@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter( } b := item.val - var err error b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) if err != nil { return false, err @@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter( } // Check the current bucket's nested records if exist - recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) + recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) + if err != nil { + return false, err + } for item := range recordsBuffer { if item.err != nil { @@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter( } r := item.val - var err error r.Entry, _, err = bucket.NextParser(r.Key, r.Value) if err != nil { return false, err diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go index 94fa87f98..d0cf611d4 100644 --- a/cmd/frostfs-lens/internal/tui/db.go +++ b/cmd/frostfs-lens/internal/tui/db.go @@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) { func load[T any]( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, filter func(key, value []byte) bool, transform func(key, value []byte) T, -) <-chan Item[T] { +) (<-chan Item[T], error) { buffer := make(chan Item[T], bufferSize) go func() { @@ -77,13 +77,13 @@ func load[T any]( } }() - return buffer + return buffer, nil } func LoadBuckets( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) <-chan Item[*Bucket] { - buffer := load( +) (<-chan Item[*Bucket], error) { + buffer, err := load( ctx, db, path, bufferSize, func(_, value []byte) bool { return value == nil @@ -98,14 +98,17 @@ func LoadBuckets( } }, ) + if err != nil { + return nil, fmt.Errorf("can't start iterating bucket: %w", err) + } - return buffer + return buffer, nil } func LoadRecords( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) <-chan Item[*Record] { - buffer := load( +) (<-chan Item[*Record], error) { + buffer, err := load( ctx, db, path, bufferSize, func(_, value []byte) bool { return value != nil @@ -121,8 +124,11 @@ func LoadRecords( } }, ) + if err != nil { + return nil, fmt.Errorf("can't start iterating bucket: %w", err) + } - return buffer + return buffer, nil } // HasBuckets checks if a bucket has nested buckets. It relies on assumption @@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) ctx, cancel := context.WithCancel(ctx) defer cancel() - buffer := load( + buffer, err := load( ctx, db, path, 1, nil, func(_, value []byte) []byte { return value }, ) + if err != nil { + return false, err + } x, ok := <-buffer if !ok { return false, nil } if x.err != nil { - return false, x.err + return false, err } if x.val != nil { - return false, nil + return false, err } return true, nil } diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go index 471514e5d..4fdf97119 100644 --- a/cmd/frostfs-lens/internal/tui/input.go +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -1,8 +1,6 @@ package tui import ( - "slices" - "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) { // Used history data for search prompt, so just make that data recent. if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { - f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) + f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...) f.history = append(f.history, s) } @@ -53,17 +51,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo f.historyPointer++ // Stop iterating over history. if f.historyPointer == len(f.history) { - f.SetText(f.currentContent) + f.InputField.SetText(f.currentContent) return } - f.SetText(f.history[f.historyPointer]) + f.InputField.SetText(f.history[f.historyPointer]) case tcell.KeyUp: if len(f.history) == 0 { return } // Start iterating over history. if f.historyPointer == len(f.history) { - f.currentContent = f.GetText() + f.currentContent = f.InputField.GetText() } // End of history. if f.historyPointer == 0 { @@ -71,7 +69,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo } // Iterate to least recent prompts. f.historyPointer-- - f.SetText(f.history[f.historyPointer]) + f.InputField.SetText(f.history[f.historyPointer]) default: f.InputField.InputHandler()(event, func(tview.Primitive) {}) } diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go index a4d392ab3..5f53ed287 100644 --- a/cmd/frostfs-lens/internal/tui/records.go +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -8,7 +8,6 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -63,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error { ctx, v.onUnmount = context.WithCancel(ctx) - tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) + tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) + if err != nil { + return err + } v.buffer = make(chan *Record, v.ui.loadBufferSize) go func() { @@ -71,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error { for item := range tempBuffer { if item.err != nil { - v.ui.stopOnError(item.err) + v.ui.stopOnError(err) break } record := item.val - var err error record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) if err != nil { v.ui.stopOnError(err) @@ -95,7 +96,9 @@ func (v *RecordsView) Mount(ctx context.Context) error { } func (v *RecordsView) Unmount() { - assert.False(v.onUnmount == nil, "try to unmount not mounted component") + if v.onUnmount == nil { + panic("try to unmount not mounted component") + } v.onUnmount() v.onUnmount = nil } diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go index cc6b7859e..bcc082821 100644 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { return } - switch v := ui.mountedPage.(type) { + switch ui.mountedPage.(type) { case *BucketsView: ui.moveNextPage(NewBucketsView(ui, res)) case *RecordsView: - bucket := v.bucket + bucket := ui.mountedPage.(*RecordsView).bucket ui.moveNextPage(NewRecordsView(ui, bucket, res)) } @@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) } - ui.MouseHandler() + ui.Box.MouseHandler() } func (ui *UI) WithPrompt(prompt string) error { diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index 513314712..de3aed660 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -14,12 +14,11 @@ import ( func initAPEManagerService(c *cfg) { contractStorage := ape_contract.NewProxyVerificationContractStorage( morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.key, + c.shared.key, c.cfgMorph.proxyScriptHash, c.cfgObject.cfgAccessPolicyEngine.policyContractHash) execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, - c.cfgMorph.client, apemanager.WithLogger(c.log)) sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go index ce8ae9662..64c3beba7 100644 --- a/cmd/frostfs-node/attributes.go +++ b/cmd/frostfs-node/attributes.go @@ -6,5 +6,9 @@ import ( ) func parseAttributes(c *cfg) { + if nodeconfig.Relay(c.appCfg) { + return + } + fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) } diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index e5df0a22d..b90641799 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,30 +1,22 @@ package main import ( - "bytes" - "cmp" - "context" - "slices" "sync" - "sync/atomic" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/hashicorp/golang-lru/v2/simplelru" - "go.uber.org/zap" ) -type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) +type netValueReader[K any, V any] func(K) (V, error) type valueWithError[V any] struct { v V @@ -57,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n // updates the value from the network on cache miss or by TTL. // // returned value should not be modified. -func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { +func (c *ttlNetCache[K, V]) get(key K) (V, error) { hit := false startedAt := time.Now() defer func() { @@ -79,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { return val.v, val.e } - v, err := c.netRdr(ctx, key) + v, err := c.netRdr(key) c.cache.Add(key, &valueWithError[V]{ v: v, @@ -117,6 +109,55 @@ func (c *ttlNetCache[K, V]) remove(key K) { hit = c.cache.Remove(key) } +// entity that provides LRU cache interface. +type lruNetCache struct { + cache *lru.Cache[uint64, *netmapSDK.NetMap] + + netRdr netValueReader[uint64, *netmapSDK.NetMap] + + metrics cacheMetrics +} + +// newNetworkLRUCache returns wrapper over netValueReader with LRU cache. +func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache { + cache, err := lru.New[uint64, *netmapSDK.NetMap](sz) + fatalOnErr(err) + + return &lruNetCache{ + cache: cache, + netRdr: netRdr, + metrics: metrics, + } +} + +// reads value by the key. +// +// updates the value from the network on cache miss. +// +// returned value should not be modified. +func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) { + hit := false + startedAt := time.Now() + defer func() { + c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) + }() + + val, ok := c.cache.Get(key) + if ok { + hit = true + return val, nil + } + + val, err := c.netRdr(key) + if err != nil { + return nil, err + } + + c.cache.Add(key, val) + + return val, nil +} + // wrapper over TTL cache of values read from the network // that implements container storage. type ttlContainerStorage struct { @@ -125,11 +166,11 @@ type ttlContainerStorage struct { } func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { - lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { - return v.Get(ctx, id) + lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) { + return v.Get(id) }, metrics.NewCacheMetrics("container")) - lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return v.DeletionInfo(ctx, id) + lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) { + return v.DeletionInfo(id) }, metrics.NewCacheMetrics("container_deletion_info")) return ttlContainerStorage{ @@ -147,245 +188,43 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) { // Get returns container value from the cache. If value is missing in the cache // or expired, then it returns value from side chain and updates the cache. -func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { - return s.containerCache.get(ctx, cnr) +func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) { + return s.containerCache.get(cnr) } -func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { - return s.delInfoCache.get(ctx, cnr) +func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) { + return s.delInfoCache.get(cnr) } type lruNetmapSource struct { netState netmap.State - client rawSource - cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] - mtx sync.RWMutex - metrics cacheMetrics - log *logger.Logger - candidates atomic.Pointer[[]netmapSDK.NodeInfo] + cache *lruNetCache } -type rawSource interface { - GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) - GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) -} - -func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, - netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, -) netmap.Source { +func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { const netmapCacheSize = 10 - cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) - fatalOnErr(err) + lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) { + return v.GetNetMapByEpoch(key) + }, metrics.NewCacheMetrics("netmap")) - src := &lruNetmapSource{ - netState: netState, - client: client, - cache: cache, - log: log, - metrics: metrics.NewCacheMetrics("netmap"), - } - - wg.Add(1) - go func() { - defer wg.Done() - src.updateCandidates(ctx, d) - }() - - return src -} - -// updateCandidates routine to merge netmap in cache with candidates list. -func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { - timer := time.NewTimer(d) - defer timer.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-timer.C: - newCandidates, err := s.client.GetCandidates(ctx) - if err != nil { - s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) - timer.Reset(d) - break - } - if len(newCandidates) == 0 { - s.candidates.Store(&newCandidates) - timer.Reset(d) - break - } - slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { - return cmp.Compare(n1.Hash(), n2.Hash()) - }) - - // Check once state changed - v := s.candidates.Load() - if v == nil { - s.candidates.Store(&newCandidates) - s.mergeCacheWithCandidates(newCandidates) - timer.Reset(d) - break - } - ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { - if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || - uint32(n1.Status()) != uint32(n2.Status()) || - slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { - return 1 - } - ne1 := slices.Collect(n1.NetworkEndpoints()) - ne2 := slices.Collect(n2.NetworkEndpoints()) - return slices.Compare(ne1, ne2) - }) - if ret != 0 { - s.candidates.Store(&newCandidates) - s.mergeCacheWithCandidates(newCandidates) - } - timer.Reset(d) - } + return &lruNetmapSource{ + netState: s, + cache: lruNetmapCache, } } -func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { - s.mtx.Lock() - tmp := s.cache.Values() - s.mtx.Unlock() - for _, pointer := range tmp { - nm := pointer.Load() - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - nm = nm.Clone() - mergeNetmapWithCandidates(updates, nm) - pointer.Store(nm) - } - } +func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff) } -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - s.mtx.RLock() - val, ok := s.cache.Get(key) - s.mtx.RUnlock() - if ok { - hit = true - return val.Load(), nil - } - - s.mtx.Lock() - defer s.mtx.Unlock() - - val, ok = s.cache.Get(key) - if ok { - hit = true - return val.Load(), nil - } - - nm, err := s.client.GetNetMapByEpoch(ctx, key) - if err != nil { - return nil, err - } - v := s.candidates.Load() - if v != nil { - updates := getNetMapNodesToUpdate(nm, *v) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - } - - p := atomic.Pointer[netmapSDK.NetMap]{} - p.Store(nm) - s.cache.Add(key, &p) - - return nm, nil +func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(epoch) } -// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. -func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { - for _, v := range updates { - if v.status != netmapSDK.UnspecifiedState { - nm.Nodes()[v.netmapIndex].SetStatus(v.status) - } - if v.externalAddresses != nil { - nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) - } - if v.endpoints != nil { - nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) - } - } -} - -type nodeToUpdate struct { - netmapIndex int - status netmapSDK.NodeState - externalAddresses []string - endpoints []string -} - -// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. -func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { - var res []nodeToUpdate - for i := range nm.Nodes() { - for _, cnd := range candidates { - if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { - var tmp nodeToUpdate - var update bool - - if cnd.Status() != nm.Nodes()[i].Status() && - (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { - update = true - tmp.status = cnd.Status() - } - - externalAddresses := cnd.ExternalAddresses() - if externalAddresses != nil && - slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { - update = true - tmp.externalAddresses = externalAddresses - } - - nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) - nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) - candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) - candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) - if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { - update = true - tmp.endpoints = candidateEndpoints - } - - if update { - tmp.netmapIndex = i - res = append(res, tmp) - } - - break - } - } - } - return res -} - -func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) -} - -func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(ctx, epoch) -} - -func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.get(ctx, epoch) +func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { + val, err := s.cache.get(epoch) if err != nil { return nil, err } @@ -393,7 +232,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (* return val, nil } -func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { +func (s *lruNetmapSource) Epoch() (uint64, error) { return s.netState.CurrentEpoch(), nil } @@ -401,10 +240,7 @@ type cachedIRFetcher struct { *ttlNetCache[struct{}, [][]byte] } -func newCachedIRFetcher(f interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) -}, -) cachedIRFetcher { +func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher { const ( irFetcherCacheSize = 1 // we intend to store only one value @@ -418,8 +254,8 @@ func newCachedIRFetcher(f interface { ) irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, - func(ctx context.Context, _ struct{}) ([][]byte, error) { - return f.InnerRingKeys(ctx) + func(_ struct{}) ([][]byte, error) { + return f.InnerRingKeys() }, metrics.NewCacheMetrics("ir_keys"), ) @@ -429,8 +265,8 @@ func newCachedIRFetcher(f interface { // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // the cache or expired, then it returns keys from side chain and updates // the cache. -func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { - val, err := f.get(ctx, struct{}{}) +func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) { + val, err := f.get(struct{}{}) if err != nil { return nil, err } @@ -453,7 +289,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M } } -func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { +func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { const ttl = time.Second * 30 hit := false @@ -475,7 +311,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { c.mtx.Lock() size = c.lastSize if !c.lastUpdated.After(prevUpdated) { - size = c.src.MaxObjectSize(ctx) + size = c.src.MaxObjectSize() c.lastSize = size c.lastUpdated = time.Now() } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index 24286826f..f8c324a2f 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -1,13 +1,10 @@ package main import ( - "context" "errors" - "sync" "testing" "time" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -20,7 +17,7 @@ func TestTTLNetCache(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) - val, err := cache.get(context.Background(), key) + val, err := cache.get(key) require.NoError(t, err) require.Equal(t, ti, val) }) @@ -29,7 +26,7 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) time.Sleep(2 * ttlDuration) - val, err := cache.get(context.Background(), key) + val, err := cache.get(key) require.NoError(t, err) require.NotEqual(t, val, ti) }) @@ -38,20 +35,20 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) cache.remove(key) - val, err := cache.get(context.Background(), key) + val, err := cache.get(key) require.NoError(t, err) require.NotEqual(t, val, ti) }) t.Run("Test Cache Error", func(t *testing.T) { cache.set("error", time.Now(), errors.New("mock error")) - _, err := cache.get(context.Background(), "error") + _, err := cache.get("error") require.Error(t, err) require.Equal(t, "mock error", err.Error()) }) } -func testNetValueReader(_ context.Context, key string) (time.Time, error) { +func testNetValueReader(key string) (time.Time, error) { if key == "error" { return time.Now(), errors.New("mock error") } @@ -61,75 +58,3 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) { type noopCacheMetricts struct{} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} - -type rawSrc struct{} - -func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { - node0 := netmapSDK.NodeInfo{} - node0.SetPublicKey([]byte{byte(1)}) - node0.SetStatus(netmapSDK.Online) - node0.SetExternalAddresses("1", "0") - node0.SetNetworkEndpoints("1", "0") - - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey([]byte{byte(1)}) - node1.SetStatus(netmapSDK.Online) - node1.SetExternalAddresses("1", "0") - node1.SetNetworkEndpoints("1", "0") - - return []netmapSDK.NodeInfo{node0, node1}, nil -} - -func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - nm := netmapSDK.NetMap{} - nm.SetEpoch(1) - - node0 := netmapSDK.NodeInfo{} - node0.SetPublicKey([]byte{byte(1)}) - node0.SetStatus(netmapSDK.Maintenance) - node0.SetExternalAddresses("0") - node0.SetNetworkEndpoints("0") - - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey([]byte{byte(1)}) - node1.SetStatus(netmapSDK.Maintenance) - node1.SetExternalAddresses("0") - node1.SetNetworkEndpoints("0") - - nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) - - return &nm, nil -} - -type st struct{} - -func (s *st) CurrentEpoch() uint64 { - return 1 -} - -func TestNetmapStorage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - wg := sync.WaitGroup{} - cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) - - nm, err := cache.GetNetMapByEpoch(ctx, 1) - require.NoError(t, err) - require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) - require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) - require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) - - require.Eventually(t, func() bool { - nm, err := cache.GetNetMapByEpoch(ctx, 1) - require.NoError(t, err) - for _, node := range nm.Nodes() { - if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && - node.NumberOfNetworkEndpoints() == 2) { - return false - } - } - return true - }, time.Second*5, time.Millisecond*10) - - cancel() - wg.Wait() -} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 96274e625..18d3e2454 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -30,18 +30,15 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" - treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -72,7 +69,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -110,8 +106,6 @@ type applicationConfiguration struct { level string destination string timestamp bool - options []zap.Option - tags [][]string } ObjectCfg struct { @@ -121,6 +115,7 @@ type applicationConfiguration struct { EngineCfg struct { errorThreshold uint32 + shardPoolSize uint32 shards []shardCfg lowMem bool } @@ -130,13 +125,15 @@ type applicationConfiguration struct { } type shardCfg struct { - compression compression.Config + compress bool + estimateCompressibility bool + estimateCompressibilityThreshold float64 smallSizeObjectLimit uint64 + uncompressableContentType []string refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode - limiter qos.Limiter metaCfg struct { path string @@ -233,71 +230,62 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) - var opts []zap.Option - if loggerconfig.ToLokiConfig(c).Enabled { - opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) - return lokiCore - })} - } - a.LoggerCfg.options = opts - a.LoggerCfg.tags = loggerconfig.Tags(c) // Object a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) - locodeDBPath := nodeconfig.LocodeDBPath(c) - parser, err := placement.NewMetricsParser(locodeDBPath) - if err != nil { - return fmt.Errorf("metrics parser creation: %w", err) + var pm []placement.Metric + for _, raw := range objectconfig.Get(c).Priority() { + m, err := placement.ParseMetric(raw) + if err != nil { + return err + } + pm = append(pm, m) } - m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) - if err != nil { - return fmt.Errorf("parse metrics: %w", err) - } - a.ObjectCfg.priorityMetrics = m + a.ObjectCfg.priorityMetrics = pm // Storage Engine a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) + a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) } -func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { - var target shardCfg +func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error { + var newConfig shardCfg - target.refillMetabase = source.RefillMetabase() - target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() - target.mode = source.Mode() - target.compression = source.Compression() - target.smallSizeObjectLimit = source.SmallSizeLimit() + newConfig.refillMetabase = oldConfig.RefillMetabase() + newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount() + newConfig.mode = oldConfig.Mode() + newConfig.compress = oldConfig.Compress() + newConfig.estimateCompressibility = oldConfig.EstimateCompressibility() + newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold() + newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes() + newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit() - a.setShardWriteCacheConfig(&target, source) + a.setShardWriteCacheConfig(&newConfig, oldConfig) - a.setShardPiloramaConfig(c, &target, source) + a.setShardPiloramaConfig(c, &newConfig, oldConfig) - if err := a.setShardStorageConfig(&target, source); err != nil { + if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil { return err } - a.setMetabaseConfig(&target, source) + a.setMetabaseConfig(&newConfig, oldConfig) - a.setGCConfig(&target, source) - if err := a.setLimiter(&target, source); err != nil { - return err - } + a.setGCConfig(&newConfig, oldConfig) - a.EngineCfg.shards = append(a.EngineCfg.shards, target) + a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) return nil } -func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { - writeCacheCfg := source.WriteCache() +func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { + writeCacheCfg := oldConfig.WriteCache() if writeCacheCfg.Enabled() { - wc := &target.writecacheCfg + wc := &newConfig.writecacheCfg wc.enabled = true wc.path = writeCacheCfg.Path() @@ -310,10 +298,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so } } -func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { +func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) { if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := source.Pilorama() - pr := &target.piloramaCfg + piloramaCfg := oldConfig.Pilorama() + pr := &newConfig.piloramaCfg pr.enabled = true pr.path = piloramaCfg.Path() @@ -324,8 +312,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ } } -func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { - blobStorCfg := source.BlobStor() +func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error { + blobStorCfg := oldConfig.BlobStor() storagesCfg := blobStorCfg.Storages() ss := make([]subStorageCfg, 0, len(storagesCfg)) @@ -359,13 +347,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc ss = append(ss, sCfg) } - target.subStorages = ss + newConfig.subStorages = ss return nil } -func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { - metabaseCfg := source.Metabase() - m := &target.metaCfg +func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { + metabaseCfg := oldConfig.Metabase() + m := &newConfig.metaCfg m.path = metabaseCfg.Path() m.perm = metabaseCfg.BoltDB().Perm() @@ -373,22 +361,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() } -func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { - gcCfg := source.GC() - target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() -} - -func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { - limitsConfig := source.Limits().ToConfig() - limiter, err := qos.NewLimiter(limitsConfig) - if err != nil { - return err - } - target.limiter = limiter - return nil +func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { + gcCfg := oldConfig.GC() + newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() + newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() + newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() + newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() } // internals contains application-specific internals that are created @@ -478,6 +456,7 @@ type shared struct { // dynamicConfiguration stores parameters of the // components that supports runtime reconfigurations. type dynamicConfiguration struct { + logger *logger.Prm pprof *httpComponent metrics *httpComponent } @@ -514,7 +493,6 @@ type cfg struct { cfgNetmap cfgNetmap cfgControlService cfgControlService cfgObject cfgObject - cfgQoSService cfgQoSService } // ReadCurrentNetMap reads network map which has been cached at the @@ -549,8 +527,6 @@ type cfgGRPC struct { maxChunkSize uint64 maxAddrAmount uint64 reconnectTimeout time.Duration - - limiter atomic.Pointer[limiting.SemaphoreLimiter] } func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { @@ -651,6 +627,7 @@ type cfgNetmap struct { state *networkState + needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime } @@ -686,6 +663,10 @@ type cfgAccessPolicyEngine struct { } type cfgObjectRoutines struct { + putRemote *ants.Pool + + putLocal *ants.Pool + replication *ants.Pool } @@ -709,9 +690,11 @@ func initCfg(appCfg *config.Config) *cfg { key := nodeconfig.Key(appCfg) + relayOnly := nodeconfig.Relay(appCfg) + netState := newNetworkState() - c.shared = initShared(appCfg, key, netState) + c.shared = initShared(appCfg, key, netState, relayOnly) netState.metrics = c.metricsCollector @@ -720,7 +703,12 @@ func initCfg(appCfg *config.Config) *cfg { logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) - logger.UpdateLevelForTags(logPrm) + if loggerconfig.ToLokiConfig(appCfg).Enabled { + log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) + return lokiCore + })) + } c.internals = initInternals(appCfg, log) @@ -731,7 +719,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgFrostfsID = initFrostfsID(appCfg) - c.cfgNetmap = initNetmap(appCfg, netState) + c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) c.cfgGRPC = initCfgGRPC() @@ -777,8 +765,12 @@ func initSdNotify(appCfg *config.Config) bool { return false } -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { - netAddr := nodeconfig.BootstrapAddresses(appCfg) +func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared { + var netAddr network.AddressGroup + + if !relayOnly { + netAddr = nodeconfig.BootstrapAddresses(appCfg) + } persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) @@ -829,15 +821,18 @@ func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) interna return result } -func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { +func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) + var reBootstrapTurnedOff atomic.Bool + reBootstrapTurnedOff.Store(relayOnly) return cfgNetmap{ scriptHash: contractsconfig.Netmap(appCfg), state: netState, workerPool: netmapWorkerPool, - reBoostrapTurnedOff: &atomic.Bool{}, + needBootstrap: !relayOnly, + reBoostrapTurnedOff: &reBootstrapTurnedOff, } } @@ -857,14 +852,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } } -func initCfgGRPC() (cfg cfgGRPC) { - maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload - maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes +func initCfgGRPC() cfgGRPC { + maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload + maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes - cfg.maxChunkSize = maxChunkSize - cfg.maxAddrAmount = maxAddrAmount - - return + return cfgGRPC{ + maxChunkSize: maxChunkSize, + maxAddrAmount: maxAddrAmount, + } } func initCfgObject(appCfg *config.Config) cfgObject { @@ -881,8 +876,9 @@ func (c *cfg) engineOpts() []engine.Option { var opts []engine.Option opts = append(opts, + engine.WithShardPoolSize(c.EngineCfg.shardPoolSize), engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log.WithTag(logger.TagEngine)), + engine.WithLogger(c.log), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), ) @@ -919,8 +915,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { writecache.WithMaxCacheSize(wcRead.sizeLimit), writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), - writecache.WithQoSLimiter(shCfg.limiter), + writecache.WithLogger(c.log), ) } return writeCacheOpts @@ -959,8 +954,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), - blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), + blobovniczatree.WithLogger(c.log), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), } @@ -983,7 +977,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. fstree.WithPerm(sRead.perm), fstree.WithDepth(sRead.depth), fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), + fstree.WithLogger(c.log), } if c.metricsCollector != nil { fstreeOpts = append(fstreeOpts, @@ -1013,9 +1007,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID ss := c.getSubstorageOpts(ctx, shCfg) blobstoreOpts := []blobstor.Option{ - blobstor.WithCompression(shCfg.compression), + blobstor.WithCompressObjects(shCfg.compress), + blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), + blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), + blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), blobstor.WithStorages(ss), - blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), + blobstor.WithLogger(c.log), } if c.metricsCollector != nil { blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) @@ -1034,13 +1031,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID } if c.metricsCollector != nil { mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) - shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics()) } var sh shardOptsWithID sh.configID = shCfg.id() sh.shOpts = []shard.Option{ - shard.WithLogger(c.log.WithTag(logger.TagShard)), + shard.WithLogger(c.log), shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithMode(shCfg.mode), @@ -1059,33 +1055,30 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return pool }), - shard.WithLimiter(shCfg.limiter), } return sh } -func (c *cfg) loggerPrm() (logger.Prm, error) { - var prm logger.Prm - // (re)init read configuration - err := prm.SetLevelString(c.LoggerCfg.level) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) - } - err = prm.SetDestination(c.LoggerCfg.destination) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) - } - prm.PrependTimestamp = c.LoggerCfg.timestamp - prm.Options = c.LoggerCfg.options - err = prm.SetTags(c.LoggerCfg.tags) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) +func (c *cfg) loggerPrm() (*logger.Prm, error) { + // check if it has been inited before + if c.dynamicConfiguration.logger == nil { + c.dynamicConfiguration.logger = new(logger.Prm) } - return prm, nil + // (re)init read configuration + err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level) + if err != nil { + // not expected since validation should be performed before + panic("incorrect log level format: " + c.LoggerCfg.level) + } + err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination) + if err != nil { + // not expected since validation should be performed before + panic("incorrect log destination format: " + c.LoggerCfg.destination) + } + c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp + + return c.dynamicConfiguration.logger, nil } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1154,7 +1147,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { c.cfgObject.cfgAccessPolicyEngine.policyContractHash) cacheSize := morphconfig.APEChainCacheSize(c.appCfg) - if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { + if cacheSize > 0 { morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) } @@ -1173,7 +1166,21 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { var err error + optNonBlocking := ants.WithNonblocking(true) + + putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote() + pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking) + fatalOnErr(err) + + putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal() + pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking) + fatalOnErr(err) + replicatorPoolSize := replicatorconfig.PoolSize(cfg) + if replicatorPoolSize <= 0 { + replicatorPoolSize = putRemoteCapacity + } + pool.replication, err = ants.NewPool(replicatorPoolSize) fatalOnErr(err) @@ -1199,7 +1206,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { } func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { - ni, err := c.netmapLocalNodeState(ctx, epoch) + ni, err := c.netmapLocalNodeState(epoch) if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), @@ -1246,6 +1253,11 @@ func (c *cfg) bootstrap(ctx context.Context) error { return bootstrapOnline(ctx, c) } +// needBootstrap checks if local node should be registered in network on bootup. +func (c *cfg) needBootstrap() bool { + return c.cfgNetmap.needBootstrap +} + type dCmp struct { name string reloadFunc func() error @@ -1320,7 +1332,15 @@ func (c *cfg) reloadConfig(ctx context.Context) { // all the components are expected to support // Logger's dynamic reconfiguration approach - components := c.getComponents(ctx) + // Logger + + logPrm, err := c.loggerPrm() + if err != nil { + c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) + return + } + + components := c.getComponents(ctx, logPrm) // Object c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) @@ -1358,17 +1378,10 @@ func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } -func (c *cfg) getComponents(ctx context.Context) []dCmp { +func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { var components []dCmp - components = append(components, dCmp{"logger", func() error { - prm, err := c.loggerPrm() - if err != nil { - return err - } - logger.UpdateLevelForTags(prm) - return nil - }}) + components = append(components, dCmp{"logger", logPrm.Reload}) components = append(components, dCmp{"runtime", func() error { setRuntimeParameters(ctx, c) return nil @@ -1389,12 +1402,6 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp { } return err }}) - if c.treeService != nil { - components = append(components, dCmp{"tree", func() error { - c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) - return nil - }}) - } if cmp, updated := metricsComponent(c); updated { if cmp.enabled { cmp.preReload = enableMetricsSvc @@ -1407,13 +1414,17 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp { components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) } - components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }}) - return components } func (c *cfg) reloadPools() error { - newSize := replicatorconfig.PoolSize(c.appCfg) + newSize := objectconfig.Put(c.appCfg).PoolSizeLocal() + c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size") + + newSize = objectconfig.Put(c.appCfg).PoolSizeRemote() + c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size") + + newSize = replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") return nil diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go index c40bf3620..36e53ea7c 100644 --- a/cmd/frostfs-node/config/calls.go +++ b/cmd/frostfs-node/config/calls.go @@ -1,7 +1,6 @@ package config import ( - "slices" "strings" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" @@ -53,5 +52,6 @@ func (x *Config) Value(name string) any { // It supports only one level of nesting and is intended to be used // to provide default values. func (x *Config) SetDefault(from *Config) { - x.defaultPath = slices.Clone(from.path) + x.defaultPath = make([]string, len(from.path)) + copy(x.defaultPath, from.path) } diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go index ee9d4268b..35dae97d9 100644 --- a/cmd/frostfs-node/config/configdir_test.go +++ b/cmd/frostfs-node/config/configdir_test.go @@ -12,10 +12,13 @@ import ( func TestConfigDir(t *testing.T) { dir := t.TempDir() - cfgFileName := path.Join(dir, "cfg_01.yml") + cfgFileName0 := path.Join(dir, "cfg_00.json") + cfgFileName1 := path.Join(dir, "cfg_01.yml") - require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) + require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777)) + require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777)) c := New("", dir, "") require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) + require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size"))) } diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go index 7994e7809..e5735e88b 100644 --- a/cmd/frostfs-node/config/engine/config.go +++ b/cmd/frostfs-node/config/engine/config.go @@ -11,6 +11,10 @@ import ( const ( subsection = "storage" + + // ShardPoolSizeDefault is a default value of routine pool size per-shard to + // process object PUT operations in a storage engine. + ShardPoolSizeDefault = 20 ) // ErrNoShardConfigured is returned when at least 1 shard is required but none are found. @@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) return nil } +// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section. +// +// Returns ShardPoolSizeDefault if the value is not a positive number. +func ShardPoolSize(c *config.Config) uint32 { + v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size") + if v > 0 { + return v + } + + return ShardPoolSizeDefault +} + // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // // Returns 0 if the the value is missing. diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 401c54edc..ef6380a62 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -14,8 +14,6 @@ import ( piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) @@ -55,6 +53,7 @@ func TestEngineSection(t *testing.T) { require.False(t, handlerCalled) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) + require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty)) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) }) @@ -64,6 +63,7 @@ func TestEngineSection(t *testing.T) { num := 0 require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) + require.EqualValues(t, 15, engineconfig.ShardPoolSize(c)) err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { defer func() { @@ -76,7 +76,6 @@ func TestEngineSection(t *testing.T) { ss := blob.Storages() pl := sc.Pilorama() gc := sc.GC() - limits := sc.Limits() switch num { case 0: @@ -101,11 +100,10 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, true, sc.Compression().Enabled) - require.Equal(t, compression.LevelFastest, sc.Compression().Level) - require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) - require.Equal(t, true, sc.Compression().EstimateCompressibility) - require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) + require.Equal(t, true, sc.Compress()) + require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) + require.Equal(t, true, sc.EstimateCompressibility()) + require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -136,86 +134,6 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) - - readLimits := limits.ToConfig().Read - writeLimits := limits.ToConfig().Write - require.Equal(t, 30*time.Second, readLimits.IdleTimeout) - require.Equal(t, int64(10_000), readLimits.MaxRunningOps) - require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) - require.Equal(t, 45*time.Second, writeLimits.IdleTimeout) - require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) - require.Equal(t, int64(100), writeLimits.MaxWaitingOps) - require.ElementsMatch(t, readLimits.Tags, - []qos.IOTagConfig{ - { - Tag: "internal", - Weight: toPtr(20), - ReservedOps: toPtr(1000), - LimitOps: toPtr(0), - }, - { - Tag: "client", - Weight: toPtr(70), - ReservedOps: toPtr(10000), - }, - { - Tag: "background", - Weight: toPtr(5), - LimitOps: toPtr(10000), - ReservedOps: toPtr(0), - }, - { - Tag: "writecache", - Weight: toPtr(5), - LimitOps: toPtr(25000), - }, - { - Tag: "policer", - Weight: toPtr(5), - LimitOps: toPtr(25000), - Prohibited: true, - }, - { - Tag: "treesync", - Weight: toPtr(5), - LimitOps: toPtr(25), - }, - }) - require.ElementsMatch(t, writeLimits.Tags, - []qos.IOTagConfig{ - { - Tag: "internal", - Weight: toPtr(200), - ReservedOps: toPtr(100), - LimitOps: toPtr(0), - }, - { - Tag: "client", - Weight: toPtr(700), - ReservedOps: toPtr(1000), - }, - { - Tag: "background", - Weight: toPtr(50), - LimitOps: toPtr(1000), - ReservedOps: toPtr(0), - }, - { - Tag: "writecache", - Weight: toPtr(50), - LimitOps: toPtr(2500), - }, - { - Tag: "policer", - Weight: toPtr(50), - LimitOps: toPtr(2500), - }, - { - Tag: "treesync", - Weight: toPtr(50), - LimitOps: toPtr(100), - }, - }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, fs.FileMode(0o644), pl.Perm()) @@ -238,9 +156,8 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, false, sc.Compression().Enabled) - require.Equal(t, compression.LevelDefault, sc.Compression().Level) - require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) + require.Equal(t, false, sc.Compress()) + require.Equal(t, []string(nil), sc.UncompressableContentTypes()) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -271,17 +188,6 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) - - readLimits := limits.ToConfig().Read - writeLimits := limits.ToConfig().Write - require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) - require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) - require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) - require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) - require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) - require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) - require.Equal(t, 0, len(readLimits.Tags)) - require.Equal(t, 0, len(writeLimits.Tags)) } return nil }) @@ -295,7 +201,3 @@ func TestEngineSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) } - -func toPtr(v float64) *float64 { - return &v -} diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go index b564d36f8..a51308b5b 100644 --- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go +++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go @@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - return max(d, 0) + if d < 0 { + d = 0 + } + return d } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - return max(s, 0) + if s < 0 { + s = 0 + } + return s } // NoSync returns the value of "no_sync" config parameter. @@ -60,5 +66,8 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) PageSize() int { s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) - return max(s, 0) + if s < 0 { + s = 0 + } + return s } diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index d42646da7..0620c9f63 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -4,11 +4,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" - limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -28,27 +26,42 @@ func From(c *config.Config) *Config { return (*Config)(c) } -func (x *Config) Compression() compression.Config { - cc := (*config.Config)(x).Sub("compression") - if cc == nil { - return compression.Config{} - } - return compression.Config{ - Enabled: config.BoolSafe(cc, "enabled"), - UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), - Level: compression.Level(config.StringSafe(cc, "level")), - EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), - EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), - } +// Compress returns the value of "compress" config parameter. +// +// Returns false if the value is not a valid bool. +func (x *Config) Compress() bool { + return config.BoolSafe( + (*config.Config)(x), + "compress", + ) +} + +// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. +// +// Returns nil if a the value is missing or is invalid. +func (x *Config) UncompressableContentTypes() []string { + return config.StringSliceSafe( + (*config.Config)(x), + "compression_exclude_content_types") +} + +// EstimateCompressibility returns the value of "estimate_compressibility" config parameter. +// +// Returns false if the value is not a valid bool. +func (x *Config) EstimateCompressibility() bool { + return config.BoolSafe( + (*config.Config)(x), + "compression_estimate_compressibility", + ) } // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func estimateCompressibilityThreshold(c *config.Config) float64 { +func (x *Config) EstimateCompressibilityThreshold() float64 { v := config.FloatOrDefault( - c, - "estimate_compressibility_threshold", + (*config.Config)(x), + "compression_estimate_compressibility_threshold", EstimateCompressibilityThresholdDefault) if v < 0.0 || v > 1.0 { return EstimateCompressibilityThresholdDefault @@ -112,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config { ) } -// Limits returns "limits" subsection as a limitsconfig.Config. -func (x *Config) Limits() *limitsconfig.Config { - return limitsconfig.From( - (*config.Config)(x). - Sub("limits"), - ) -} - // RefillMetabase returns the value of "resync_metabase" config parameter. // // Returns false if the value is not a valid bool. diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go deleted file mode 100644 index ccd1e0000..000000000 --- a/cmd/frostfs-node/config/engine/shard/limits/config.go +++ /dev/null @@ -1,112 +0,0 @@ -package limits - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "github.com/spf13/cast" -) - -// From wraps config section into Config. -func From(c *config.Config) *Config { - return (*Config)(c) -} - -// Config is a wrapper over the config section -// which provides access to Shard's limits configurations. -type Config config.Config - -func (x *Config) ToConfig() qos.LimiterConfig { - result := qos.LimiterConfig{ - Read: x.read(), - Write: x.write(), - } - panicOnErr(result.Validate()) - return result -} - -func (x *Config) read() qos.OpConfig { - return x.parse("read") -} - -func (x *Config) write() qos.OpConfig { - return x.parse("write") -} - -func (x *Config) parse(sub string) qos.OpConfig { - c := (*config.Config)(x).Sub(sub) - var result qos.OpConfig - - if s := config.Int(c, "max_waiting_ops"); s > 0 { - result.MaxWaitingOps = s - } else { - result.MaxWaitingOps = qos.NoLimit - } - - if s := config.Int(c, "max_running_ops"); s > 0 { - result.MaxRunningOps = s - } else { - result.MaxRunningOps = qos.NoLimit - } - - if s := config.DurationSafe(c, "idle_timeout"); s > 0 { - result.IdleTimeout = s - } else { - result.IdleTimeout = qos.DefaultIdleTimeout - } - - result.Tags = tags(c) - - return result -} - -func tags(c *config.Config) []qos.IOTagConfig { - c = c.Sub("tags") - var result []qos.IOTagConfig - for i := 0; ; i++ { - tag := config.String(c, strconv.Itoa(i)+".tag") - if tag == "" { - return result - } - - var tagConfig qos.IOTagConfig - tagConfig.Tag = tag - - v := c.Value(strconv.Itoa(i) + ".weight") - if v != nil { - w, err := cast.ToFloat64E(v) - panicOnErr(err) - tagConfig.Weight = &w - } - - v = c.Value(strconv.Itoa(i) + ".limit_ops") - if v != nil { - l, err := cast.ToFloat64E(v) - panicOnErr(err) - tagConfig.LimitOps = &l - } - - v = c.Value(strconv.Itoa(i) + ".reserved_ops") - if v != nil { - r, err := cast.ToFloat64E(v) - panicOnErr(err) - tagConfig.ReservedOps = &r - } - - v = c.Value(strconv.Itoa(i) + ".prohibited") - if v != nil { - r, err := cast.ToBoolE(v) - panicOnErr(err) - tagConfig.Prohibited = r - } - - result = append(result, tagConfig) - } -} - -func panicOnErr(err error) { - if err != nil { - panic(err) - } -} diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go index 5d4e8f408..28671ca55 100644 --- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go +++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go @@ -52,7 +52,10 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - return max(d, 0) + if d <= 0 { + d = 0 + } + return d } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - return max(s, 0) + if s <= 0 { + s = 0 + } + return s } diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go index 20f373184..ba9eeea2b 100644 --- a/cmd/frostfs-node/config/logger/config.go +++ b/cmd/frostfs-node/config/logger/config.go @@ -2,7 +2,6 @@ package loggerconfig import ( "os" - "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -61,21 +60,6 @@ func Timestamp(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "timestamp") } -// Tags returns the value of "tags" config parameter from "logger" section. -func Tags(c *config.Config) [][]string { - var res [][]string - sub := c.Sub(subsection).Sub("tags") - for i := 0; ; i++ { - s := sub.Sub(strconv.FormatInt(int64(i), 10)) - names := config.StringSafe(s, "names") - if names == "" { - break - } - res = append(res, []string{names, config.StringSafe(s, "level")}) - } - return res -} - // ToLokiConfig extracts loki config. func ToLokiConfig(c *config.Config) loki.Config { hostname, _ := os.Hostname() diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go index 796ad529e..ffe8ac693 100644 --- a/cmd/frostfs-node/config/logger/config_test.go +++ b/cmd/frostfs-node/config/logger/config_test.go @@ -22,9 +22,6 @@ func TestLoggerSection_Level(t *testing.T) { require.Equal(t, "debug", loggerconfig.Level(c)) require.Equal(t, "journald", loggerconfig.Destination(c)) require.Equal(t, true, loggerconfig.Timestamp(c)) - tags := loggerconfig.Tags(c) - require.Equal(t, "main, morph", tags[0][0]) - require.Equal(t, "debug", tags[0][1]) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index a9f774d18..d089870ea 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -33,9 +33,6 @@ const ( // ContainerCacheSizeDefault represents the default size for the container cache. ContainerCacheSizeDefault = 100 - - // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. - PollCandidatesTimeoutDefault = 20 * time.Second ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -157,17 +154,3 @@ func FrostfsIDCacheSize(c *config.Config) uint32 { } return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") } - -// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter -// from "morph" section. -// -// Returns PollCandidatesTimeoutDefault if the value is not positive duration. -func NetmapCandidatesPollInterval(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection). - Sub("netmap").Sub("candidates"), "poll_interval") - if v > 0 { - return v - } - - return PollCandidatesTimeoutDefault -} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index c50718c5f..4d063245b 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -3,9 +3,7 @@ package nodeconfig import ( "fmt" "io/fs" - "iter" "os" - "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -90,8 +88,12 @@ func Wallet(c *config.Config) *keys.PrivateKey { type stringAddressGroup []string -func (x stringAddressGroup) Addresses() iter.Seq[string] { - return slices.Values(x) +func (x stringAddressGroup) IterateAddresses(f func(string) bool) { + for i := range x { + if f(x[i]) { + break + } + } } func (x stringAddressGroup) NumberOfAddresses() int { @@ -131,6 +133,14 @@ func Attributes(c *config.Config) (attrs []string) { return } +// Relay returns the value of "relay" config parameter +// from "node" section. +// +// Returns false if the value is not set. +func Relay(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "relay") +} + // PersistentSessions returns structure that provides access to "persistent_sessions" // subsection of "node" section. func PersistentSessions(c *config.Config) PersistentSessionsConfig { @@ -188,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string { // // Returns PermDefault if the value is not a positive number. func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { - p := config.UintSafe(l.cfg, "perm") + p := config.UintSafe((*config.Config)(l.cfg), "perm") if p == 0 { p = PermDefault } @@ -200,15 +210,10 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { // // Returns false if the value is not a boolean. func (l PersistentPolicyRulesConfig) NoSync() bool { - return config.BoolSafe(l.cfg, "no_sync") + return config.BoolSafe((*config.Config)(l.cfg), "no_sync") } // CompatibilityMode returns true if need to run node in compatibility with previous versions mode. func CompatibilityMode(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") } - -// LocodeDBPath returns path to LOCODE database. -func LocodeDBPath(c *config.Config) string { - return config.String(c.Sub(subsection), "locode_db_path") -} diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index 9af1dc038..7b9adecf4 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -29,10 +29,12 @@ func TestNodeSection(t *testing.T) { ) attribute := Attributes(empty) + relay := Relay(empty) persisessionsPath := PersistentSessions(empty).Path() persistatePath := PersistentState(empty).Path() require.Empty(t, attribute) + require.Equal(t, false, relay) require.Equal(t, "", persisessionsPath) require.Equal(t, PersistentStatePathDefault, persistatePath) }) @@ -43,6 +45,7 @@ func TestNodeSection(t *testing.T) { key := Key(c) addrs := BootstrapAddresses(c) attributes := Attributes(c) + relay := Relay(c) wKey := Wallet(c) persisessionsPath := PersistentSessions(c).Path() persistatePath := PersistentState(c).Path() @@ -84,6 +87,8 @@ func TestNodeSection(t *testing.T) { return false }) + require.Equal(t, true, relay) + require.Len(t, attributes, 2) require.Equal(t, "Price:11", attributes[0]) require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go index c8c967d30..6ff1fe2ab 100644 --- a/cmd/frostfs-node/config/object/config.go +++ b/cmd/frostfs-node/config/object/config.go @@ -21,6 +21,10 @@ const ( putSubsection = "put" getSubsection = "get" + + // PutPoolSizeDefault is a default value of routine pool size to + // process object.Put requests in object service. + PutPoolSizeDefault = 10 ) // Put returns structure that provides access to "put" subsection of @@ -31,6 +35,30 @@ func Put(c *config.Config) PutConfig { } } +// PoolSizeRemote returns the value of "remote_pool_size" config parameter. +// +// Returns PutPoolSizeDefault if the value is not a positive number. +func (g PutConfig) PoolSizeRemote() int { + v := config.Int(g.cfg, "remote_pool_size") + if v > 0 { + return int(v) + } + + return PutPoolSizeDefault +} + +// PoolSizeLocal returns the value of "local_pool_size" config parameter. +// +// Returns PutPoolSizeDefault if the value is not a positive number. +func (g PutConfig) PoolSizeLocal() int { + v := config.Int(g.cfg, "local_pool_size") + if v > 0 { + return int(v) + } + + return PutPoolSizeDefault +} + // SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. func (g PutConfig) SkipSessionTokenIssuerVerification() bool { return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go index 1c525ef55..e2bb105d9 100644 --- a/cmd/frostfs-node/config/object/config_test.go +++ b/cmd/frostfs-node/config/object/config_test.go @@ -13,6 +13,8 @@ func TestObjectSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() + require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote()) + require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal()) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) }) @@ -20,6 +22,8 @@ func TestObjectSection(t *testing.T) { const path = "../../../../config/example/node" fileConfigTest := func(c *config.Config) { + require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote()) + require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal()) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) } diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go deleted file mode 100644 index 85f8180ed..000000000 --- a/cmd/frostfs-node/config/qos/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package qos - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -const ( - subsection = "qos" - criticalSubSection = "critical" - internalSubSection = "internal" -) - -// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config -// parameter from "qos" section. -// -// Returns an empty list if not set. -func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys { - return authorizedKeys(c, criticalSubSection) -} - -// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config -// parameter from "qos" section. -// -// Returns an empty list if not set. -func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys { - return authorizedKeys(c, internalSubSection) -} - -func authorizedKeys(c *config.Config, sub string) keys.PublicKeys { - strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys") - pubs := make(keys.PublicKeys, 0, len(strKeys)) - - for i := range strKeys { - pub, err := keys.NewPublicKeyFromString(strKeys[i]) - if err != nil { - panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err)) - } - - pubs = append(pubs, pub) - } - - return pubs -} diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go deleted file mode 100644 index b3b6019cc..000000000 --- a/cmd/frostfs-node/config/qos/config_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package qos - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestQoSSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - empty := configtest.EmptyConfig() - - require.Empty(t, CriticalAuthorizedKeys(empty)) - require.Empty(t, InternalAuthorizedKeys(empty)) - }) - - const path = "../../../../config/example/node" - - criticalPubs := make(keys.PublicKeys, 2) - criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") - criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") - - internalPubs := make(keys.PublicKeys, 2) - internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2") - internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a") - - fileConfigTest := func(c *config.Config) { - require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c)) - require.Equal(t, internalPubs, InternalAuthorizedKeys(c)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) -} diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go index e954bf19d..0fbac935c 100644 --- a/cmd/frostfs-node/config/replicator/config.go +++ b/cmd/frostfs-node/config/replicator/config.go @@ -11,8 +11,6 @@ const ( // PutTimeoutDefault is a default timeout of object put request in replicator. PutTimeoutDefault = 5 * time.Second - // PoolSizeDefault is a default pool size for put request in replicator. - PoolSizeDefault = 10 ) // PutTimeout returns the value of "put_timeout" config parameter @@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration { // PoolSize returns the value of "pool_size" config parameter // from "replicator" section. -// -// Returns PoolSizeDefault if the value is non-positive integer. func PoolSize(c *config.Config) int { - v := int(config.IntSafe(c.Sub(subsection), "pool_size")) - if v > 0 { - return v - } - - return PoolSizeDefault + return int(config.IntSafe(c.Sub(subsection), "pool_size")) } diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go index 2aa490946..2129c01b4 100644 --- a/cmd/frostfs-node/config/replicator/config_test.go +++ b/cmd/frostfs-node/config/replicator/config_test.go @@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) { empty := configtest.EmptyConfig() require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) - require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) + require.Equal(t, 0, replicatorconfig.PoolSize(empty)) }) const path = "../../../../config/example/node" diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go deleted file mode 100644 index e0efdfde2..000000000 --- a/cmd/frostfs-node/config/rpc/config.go +++ /dev/null @@ -1,42 +0,0 @@ -package rpcconfig - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" -) - -const ( - subsection = "rpc" - limitsSubsection = "limits" -) - -type LimitConfig struct { - Methods []string - MaxOps int64 -} - -// Limits returns the "limits" config from "rpc" section. -func Limits(c *config.Config) []LimitConfig { - c = c.Sub(subsection).Sub(limitsSubsection) - - var limits []LimitConfig - - for i := uint64(0); ; i++ { - si := strconv.FormatUint(i, 10) - sc := c.Sub(si) - - methods := config.StringSliceSafe(sc, "methods") - if len(methods) == 0 { - break - } - - if sc.Value("max_ops") == nil { - panic("no max operations for method group") - } - - limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) - } - - return limits -} diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go deleted file mode 100644 index a6365e19f..000000000 --- a/cmd/frostfs-node/config/rpc/config_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package rpcconfig - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "github.com/stretchr/testify/require" -) - -func TestRPCSection(t *testing.T) { - t.Run("defaults", func(t *testing.T) { - require.Empty(t, Limits(configtest.EmptyConfig())) - }) - - t.Run("correct config", func(t *testing.T) { - const path = "../../../../config/example/node" - - fileConfigTest := func(c *config.Config) { - limits := Limits(c) - require.Len(t, limits, 2) - - limit0 := limits[0] - limit1 := limits[1] - - require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) - require.Equal(t, limit0.MaxOps, int64(1000)) - - require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) - require.Equal(t, limit1.MaxOps, int64(10000)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) - - t.Run("no max operations", func(t *testing.T) { - const path = "testdata/no_max_ops" - - fileConfigTest := func(c *config.Config) { - require.Panics(t, func() { _ = Limits(c) }) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) - - t.Run("zero max operations", func(t *testing.T) { - const path = "testdata/zero_max_ops" - - fileConfigTest := func(c *config.Config) { - limits := Limits(c) - require.Len(t, limits, 2) - - limit0 := limits[0] - limit1 := limits[1] - - require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) - require.Equal(t, limit0.MaxOps, int64(0)) - - require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) - require.Equal(t, limit1.MaxOps, int64(10000)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) -} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env deleted file mode 100644 index 2fed4c5bc..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env +++ /dev/null @@ -1,3 +0,0 @@ -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json deleted file mode 100644 index 6156aa71d..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ] - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - } -} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml deleted file mode 100644 index e50b7ae93..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml +++ /dev/null @@ -1,8 +0,0 @@ -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env deleted file mode 100644 index ce7302b0b..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env +++ /dev/null @@ -1,4 +0,0 @@ -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_0_MAX_OPS=0 -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json deleted file mode 100644 index 16a1c173f..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ], - "max_ops": 0 - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - } -} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml deleted file mode 100644 index 525d768d4..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml +++ /dev/null @@ -1,9 +0,0 @@ -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 0 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index bdb280d87..be0acf738 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) { wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) - c.cnrClient = wrap + c.shared.cnrClient = wrap cnrSrc := cntClient.AsContainerSource(wrap) @@ -43,11 +43,11 @@ func initContainerService(_ context.Context, c *cfg) { fatalOnErr(err) cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) - if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { + if cacheSize > 0 { frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } - c.frostfsidClient = frostfsIDSubjectProvider + c.shared.frostfsidClient = frostfsIDSubjectProvider c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( @@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) { service := containerService.NewSignService( &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, + newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, containerService.NewSplitterService( c.cfgContainer.containerBatchSize, c.respSvc, containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), @@ -100,7 +100,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c // TODO: use owner directly from the event after neofs-contract#256 will become resolved // but don't forget about the profit of reading the new container and caching it: // creation success are most commonly tracked by polling GET op. - cnr, err := cnrSrc.Get(ctx, ev.ID) + cnr, err := cnrSrc.Get(ev.ID) if err == nil { containerCache.containerCache.set(ev.ID, cnr, nil) } else { @@ -221,25 +221,25 @@ type morphContainerReader struct { src containerCore.Source lister interface { - ContainersOf(context.Context, *user.ID) ([]cid.ID, error) - IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error + ContainersOf(*user.ID) ([]cid.ID, error) + IterateContainersOf(*user.ID, func(cid.ID) error) error } } -func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { - return x.src.Get(ctx, id) +func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) { + return x.src.Get(id) } -func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { - return x.src.DeletionInfo(ctx, id) +func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) { + return x.src.DeletionInfo(id) } -func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { - return x.lister.ContainersOf(ctx, id) +func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { + return x.lister.ContainersOf(id) } -func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error { - return x.lister.IterateContainersOf(ctx, id, processCID) +func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error { + return x.lister.IterateContainersOf(id, processCID) } type morphContainerWriter struct { diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index 1825013c7..ecd82bba5 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -7,12 +7,9 @@ import ( controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -53,14 +50,7 @@ func initControlService(ctx context.Context, c *cfg) { return } - c.cfgControlService.server = grpc.NewServer( - grpc.ChainUnaryInterceptor( - qos.NewSetCriticalIOTagUnaryServerInterceptor(), - metrics.NewUnaryServerInterceptor(), - tracing.NewUnaryServerInterceptor(), - ), - // control service has no stream methods, so no stream interceptors added - ) + c.cfgControlService.server = grpc.NewServer() c.onShutdown(func() { stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go index d2d4e9785..3cca09105 100644 --- a/cmd/frostfs-node/frostfsid.go +++ b/cmd/frostfs-node/frostfsid.go @@ -1,7 +1,6 @@ package main import ( - "context" "strings" "time" @@ -43,7 +42,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int } } -func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { +func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) { hit := false startedAt := time.Now() defer func() { @@ -56,7 +55,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) return result.subject, result.err } - subj, err := m.subjProvider.GetSubject(ctx, addr) + subj, err := m.subjProvider.GetSubject(addr) if err != nil { if m.isCacheableError(err) { m.subjCache.Add(addr, subjectWithError{ @@ -70,7 +69,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) return subj, nil } -func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { +func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { hit := false startedAt := time.Now() defer func() { @@ -83,7 +82,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util. return result.subject, result.err } - subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) + subjExt, err := m.subjProvider.GetSubjectExtended(addr) if err != nil { if m.isCacheableError(err) { m.subjExtCache.Add(addr, subjectExtWithError{ diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 6b6d44750..6105be861 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -4,19 +4,14 @@ import ( "context" "crypto/tls" "errors" - "fmt" "net" "time" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" - rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -135,16 +130,12 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( - qos.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), - qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), grpc.ChainStreamInterceptor( - qos.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(), - qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), } @@ -233,54 +224,3 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) } - -func initRPCLimiter(c *cfg) error { - var limits []limiting.KeyLimit - for _, l := range rpcconfig.Limits(c.appCfg) { - limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) - } - - if err := validateRPCLimits(c, limits); err != nil { - return fmt.Errorf("validate RPC limits: %w", err) - } - - limiter, err := limiting.NewSemaphoreLimiter(limits) - if err != nil { - return fmt.Errorf("create RPC limiter: %w", err) - } - - c.cfgGRPC.limiter.Store(limiter) - return nil -} - -func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error { - availableMethods := getAvailableMethods(c.cfgGRPC.servers) - for _, limit := range limits { - for _, method := range limit.Keys { - if _, ok := availableMethods[method]; !ok { - return fmt.Errorf("set limit on an unknown method %q", method) - } - } - } - return nil -} - -func getAvailableMethods(servers []grpcServer) map[string]struct{} { - res := make(map[string]struct{}) - for _, server := range servers { - for _, method := range getMethodsForServer(server.Server) { - res[method] = struct{}{} - } - } - return res -} - -func getMethodsForServer(server *grpc.Server) []string { - var res []string - for service, info := range server.GetServiceInfo() { - for _, method := range info.Methods { - res = append(res, fmt.Sprintf("/%s/%s", service, method.Name)) - } - } - return res -} diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index 0228d2a10..3c15dc439 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) - initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) }) initAccessPolicyEngine(ctx, c) initAndLog(ctx, c, "access policy engine", func(c *cfg) { @@ -117,8 +116,6 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "apemanager", initAPEManagerService) initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) - initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) }) - initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index d9ca01e70..19b4af51f 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -8,38 +8,38 @@ import ( func metricsComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.metrics == nil { - c.metrics = new(httpComponent) - c.metrics.cfg = c - c.metrics.name = "metrics" - c.metrics.handler = metrics.Handler() + if c.dynamicConfiguration.metrics == nil { + c.dynamicConfiguration.metrics = new(httpComponent) + c.dynamicConfiguration.metrics.cfg = c + c.dynamicConfiguration.metrics.name = "metrics" + c.dynamicConfiguration.metrics.handler = metrics.Handler() updated = true } // (re)init read configuration enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.metrics.enabled { - c.metrics.enabled = enabled + if enabled != c.dynamicConfiguration.metrics.enabled { + c.dynamicConfiguration.metrics.enabled = enabled updated = true } address := metricsconfig.Address(c.appCfg) - if address != c.metrics.address { - c.metrics.address = address + if address != c.dynamicConfiguration.metrics.address { + c.dynamicConfiguration.metrics.address = address updated = true } dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.metrics.shutdownDur { - c.metrics.shutdownDur = dur + if dur != c.dynamicConfiguration.metrics.shutdownDur { + c.dynamicConfiguration.metrics.shutdownDur = dur updated = true } - return c.metrics, updated + return c.dynamicConfiguration.metrics, updated } func enableMetricsSvc(c *cfg) { - c.metricsSvc.Enable() + c.shared.metricsSvc.Enable() } func disableMetricsSvc(c *cfg) { - c.metricsSvc.Disable() + c.shared.metricsSvc.Disable() } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 917cf6fc0..5415da12a 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -61,11 +60,10 @@ func (c *cfg) initMorphComponents(ctx context.Context) { } if c.cfgMorph.cacheTTL < 0 { - netmapSource = newRawNetmapStorage(wrap) + netmapSource = wrap } else { // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, - morphconfig.NetmapCandidatesPollInterval(c.appCfg)) + netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) } c.netMapSource = netmapSource @@ -85,7 +83,7 @@ func initMorphClient(ctx context.Context, c *cfg) { cli, err := client.New(ctx, c.key, client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log.WithTag(logger.TagMorph)), + client.WithLogger(c.log), client.WithMetrics(c.metricsCollector.MorphClientMetrics()), client.WithEndpoints(addresses...), client.WithConnLostCallback(func() { @@ -153,7 +151,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error } func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { - if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { + if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil { return err } @@ -166,7 +164,6 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { err error subs subscriber.Subscriber ) - log := c.log.WithTag(logger.TagMorph) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { @@ -175,14 +172,14 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { } subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: log, + Log: c.log, StartFromBlock: fromSideChainBlock, Client: c.cfgMorph.client, }) fatalOnErr(err) lis, err := event.NewListener(event.ListenerParams{ - Logger: log, + Logger: c.log, Subscriber: subs, }) fatalOnErr(err) @@ -200,7 +197,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, + c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -211,11 +208,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { - log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, + c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 7dfb4fe12..2eb4cd132 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,7 +8,6 @@ import ( "net" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -87,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { } } - s.setControlNetmapStatus(ctrlNetSt) + s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt)) } // sets the current node state to the given value. Subsequent cfg.bootstrap @@ -105,7 +104,9 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { v := s.nodeInfo.Load() if v != nil { res, ok = v.(netmapSDK.NodeInfo) - assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) + if !ok { + panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v)) + } } return @@ -123,11 +124,7 @@ func nodeKeyFromNetmap(c *cfg) []byte { func (c *cfg) iterateNetworkAddresses(f func(string) bool) { ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - for s := range ni.NetworkEndpoints() { - if f(s) { - return - } - } + ni.IterateNetworkEndpoints(f) } } @@ -187,7 +184,7 @@ func addNewEpochNotificationHandlers(c *cfg) { c.updateContractNodeInfo(ctx, e) - if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 + if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } @@ -209,12 +206,14 @@ func addNewEpochNotificationHandlers(c *cfg) { // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. func bootstrapNode(ctx context.Context, c *cfg) { - if c.IsMaintenance() { - c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return + if c.needBootstrap() { + if c.IsMaintenance() { + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + return + } + err := c.bootstrap(ctx) + fatalOnErrDetails("bootstrap error", err) } - err := c.bootstrap(ctx) - fatalOnErrDetails("bootstrap error", err) } func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { @@ -240,7 +239,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser // initNetmapState inits current Network map state. // Must be called after Morph components initialization. func initNetmapState(ctx context.Context, c *cfg) { - epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) + epoch, err := c.cfgNetmap.wrapper.Epoch() fatalOnErrDetails("could not initialize current epoch number", err) var ni *netmapSDK.NodeInfo @@ -279,7 +278,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string { } func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { - nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) + nmNodes, err := c.cfgNetmap.wrapper.GetCandidates() if err != nil { return nil, err } @@ -292,7 +291,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm } } - node, err := c.netmapLocalNodeState(ctx, epoch) + node, err := c.netmapLocalNodeState(epoch) if err != nil { return nil, err } @@ -313,9 +312,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm return candidate, nil } -func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { +func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { // calculate current network state - nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) + nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch) if err != nil { return nil, err } @@ -350,6 +349,8 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { ) } +var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") + func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: @@ -361,6 +362,10 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro c.stopMaintenance(ctx) + if !c.needBootstrap() { + return errRelayBootstrap + } + if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) return bootstrapOnline(ctx, c) @@ -371,8 +376,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) } -func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { - epoch, err := c.netMapSource.Epoch(ctx) +func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { + epoch, err := c.netMapSource.Epoch() if err != nil { return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) } @@ -385,7 +390,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error { } func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { - netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) + netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration() if err != nil { err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) } else if !netSettings.MaintenanceModeAllowed { @@ -418,7 +423,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient. if err != nil { return err } - return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) + return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res) } type netInfo struct { @@ -433,7 +438,7 @@ type netInfo struct { msPerBlockRdr func() (int64, error) } -func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { +func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { magic, err := n.magic.MagicNumber() if err != nil { return nil, err @@ -443,7 +448,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetMagicNumber(magic) - netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) + netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration() if err != nil { return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) } diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go deleted file mode 100644 index e6be9cdf5..000000000 --- a/cmd/frostfs-node/netmap_source.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -type rawNetmapSource struct { - client *netmapClient.Client -} - -func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { - return &rawNetmapSource{ - client: client, - } -} - -func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { - nm, err := s.client.GetNetMap(ctx, diff) - if err != nil { - return nil, err - } - candidates, err := s.client.GetCandidates(ctx) - if err != nil { - return nil, err - } - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - return nm, nil -} - -func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - nm, err := s.client.GetNetMapByEpoch(ctx, epoch) - if err != nil { - return nil, err - } - candidates, err := s.client.GetCandidates(ctx) - if err != nil { - return nil, err - } - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - return nm, nil -} - -func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { - return s.client.Epoch(ctx) -} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index c33c02b3f..f82a8e533 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -16,6 +16,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" @@ -31,7 +32,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -54,10 +54,10 @@ type objectSvc struct { patch *patchsvc.Service } -func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { - sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) +func (c *cfg) MaxObjectSize() uint64 { + sz, err := c.cfgNetmap.wrapper.MaxObjectSize() if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, zap.Error(err), ) } @@ -122,8 +122,8 @@ type innerRingFetcherWithNotary struct { sidechain *morphClient.Client } -func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { - keys, err := fn.sidechain.NeoFSAlphabetList(ctx) +func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { + keys, err := fn.sidechain.NeoFSAlphabetList() if err != nil { return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) } @@ -168,14 +168,16 @@ func initObjectService(c *cfg) { sPatch := createPatchSvc(sGet, sPut) // build service pipeline - // grpc | audit | qos | | signature | response | acl | ape | split + // grpc | audit | | signature | response | acl | ape | split splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - apeSvc := createAPEService(c, &irFetcher, splitSvc) + apeSvc := createAPEService(c, splitSvc) + + aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) var commonSvc objectService.Common - commonSvc.Init(&c.internals, apeSvc) + commonSvc.Init(&c.internals, aclSvc) respSvc := objectService.NewResponseService( &commonSvc, @@ -187,10 +189,9 @@ func initObjectService(c *cfg) { respSvc, ) - c.metricsSvc = objectService.NewMetricCollector( + c.shared.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) - auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) + auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { @@ -214,12 +215,14 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl prm.MarkAsGarbage(addr) prm.WithForceRemoval() - return ls.Inhume(ctx, prm) + _, err := ls.Inhume(ctx, prm) + return err } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) + pol := policer.New( - policer.WithLogger(c.log.WithTag(logger.TagPolicer)), + policer.WithLogger(c.log), policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), policer.WithBuryFunc(buryFn), policer.WithContainerSource(c.cfgObject.cnrSource), @@ -263,7 +266,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl var inhumePrm engine.InhumePrm inhumePrm.MarkAsGarbage(addr) - if err := ls.Inhume(ctx, inhumePrm); err != nil { + _, err := ls.Inhume(ctx, inhumePrm) + if err != nil { c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, zap.Error(err), ) @@ -281,7 +285,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl }) } -func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { +func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { return &innerRingFetcherWithNotary{ sidechain: c.cfgMorph.client, } @@ -291,7 +295,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( - replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), + replicator.WithLogger(c.log), replicator.WithPutTimeout( replicatorconfig.PutTimeout(c.appCfg), ), @@ -323,6 +327,7 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche c, c.cfgNetmap.state, irFetcher, + objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal), objectwriter.WithLogger(c.log), objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), ) @@ -348,7 +353,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav c.netMapSource, keyStorage, containerSource, - searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), + searchsvc.WithLogger(c.log), ) } @@ -374,7 +379,7 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ), coreConstructor, containerSource, - getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) + getsvc.WithLogger(c.log)) } func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -385,7 +390,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag c.netMapSource, c, c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), + getsvcV2.WithLogger(c.log), ) } @@ -402,7 +407,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi cfg: c, }, keyStorage, - deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), + deletesvc.WithLogger(c.log), ) } @@ -426,19 +431,28 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi ) } -func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { +func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { + return v2.New( + apeSvc, + c.netMapSource, + irFetcher, + c.cfgObject.cnrSource, + v2.WithLogger(c.log), + ) +} + +func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.frostfsidClient, + c.shared.frostfsidClient, c.netMapSource, c.cfgNetmap.state, c.cfgObject.cnrSource, c.binPublicKey, ), - objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), splitSvc, ) } @@ -462,7 +476,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad prm.WithTarget(tombstone, addrs...) - return e.engine.Inhume(ctx, prm) + _, err := e.engine.Inhume(ctx, prm) + return err } func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index e4da8119f..5b40c8a88 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) { func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.pprof == nil { - c.pprof = new(httpComponent) - c.pprof.cfg = c - c.pprof.name = "pprof" - c.pprof.handler = httputil.Handler() - c.pprof.preReload = tuneProfilers + if c.dynamicConfiguration.pprof == nil { + c.dynamicConfiguration.pprof = new(httpComponent) + c.dynamicConfiguration.pprof.cfg = c + c.dynamicConfiguration.pprof.name = "pprof" + c.dynamicConfiguration.pprof.handler = httputil.Handler() + c.dynamicConfiguration.pprof.preReload = tuneProfilers updated = true } // (re)init read configuration enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.pprof.enabled { - c.pprof.enabled = enabled + if enabled != c.dynamicConfiguration.pprof.enabled { + c.dynamicConfiguration.pprof.enabled = enabled updated = true } address := profilerconfig.Address(c.appCfg) - if address != c.pprof.address { - c.pprof.address = address + if address != c.dynamicConfiguration.pprof.address { + c.dynamicConfiguration.pprof.address = address updated = true } dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.pprof.shutdownDur { - c.pprof.shutdownDur = dur + if dur != c.dynamicConfiguration.pprof.shutdownDur { + c.dynamicConfiguration.pprof.shutdownDur = dur updated = true } - return c.pprof, updated + return c.dynamicConfiguration.pprof, updated } func tuneProfilers(c *cfg) { diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go deleted file mode 100644 index 6394b668b..000000000 --- a/cmd/frostfs-node/qos.go +++ /dev/null @@ -1,108 +0,0 @@ -package main - -import ( - "bytes" - "context" - - qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "go.uber.org/zap" -) - -type cfgQoSService struct { - netmapSource netmap.Source - logger *logger.Logger - allowedCriticalPubs [][]byte - allowedInternalPubs [][]byte -} - -func initQoSService(c *cfg) { - criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) - internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg) - rawCriticalPubs := make([][]byte, 0, len(criticalPubs)) - rawInternalPubs := make([][]byte, 0, len(internalPubs)) - for i := range criticalPubs { - rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes()) - } - for i := range internalPubs { - rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) - } - - c.cfgQoSService = cfgQoSService{ - netmapSource: c.netMapSource, - logger: c.log, - allowedCriticalPubs: rawCriticalPubs, - allowedInternalPubs: rawInternalPubs, - } -} - -func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { - rawTag, defined := qosTagging.IOTagFromContext(ctx) - if !defined { - if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { - return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) - } - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - ioTag, err := qos.FromRawString(rawTag) - if err != nil { - s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - - switch ioTag { - case qos.IOTagClient: - return ctx - case qos.IOTagCritical: - for _, pk := range s.allowedCriticalPubs { - if bytes.Equal(pk, requestSignPublicKey) { - return ctx - } - } - nm, err := s.netmapSource.GetNetMap(ctx, 0) - if err != nil { - s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - for _, node := range nm.Nodes() { - if bytes.Equal(node.PublicKey(), requestSignPublicKey) { - return ctx - } - } - s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - case qos.IOTagInternal: - if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { - return ctx - } - s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - default: - s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } -} - -func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { - for _, pk := range s.allowedInternalPubs { - if bytes.Equal(pk, publicKey) { - return true - } - } - nm, err := s.netmapSource.GetNetMap(ctx, 0) - if err != nil { - s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) - return false - } - for _, node := range nm.Nodes() { - if bytes.Equal(node.PublicKey(), publicKey) { - return true - } - } - - return false -} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go deleted file mode 100644 index 971f9eebf..000000000 --- a/cmd/frostfs-node/qos_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package main - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestQoSService_Client(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag client defined", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) -} - -func TestQoSService_Internal(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) -} - -func TestQoSService_Critical(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagCritical.String(), tag) - }) - t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagCritical.String(), tag) - }) -} - -func TestQoSService_NetmapGetError(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - s.netmapSource = &utilTesting.TestNetmapSource{} - t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) -} - -func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { - nmSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - reqSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - allowedCritSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - allowedIntSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - var node netmap.NodeInfo - node.SetPublicKey(nmSigner.PublicKey().Bytes()) - nm := &netmap.NetMap{} - nm.SetEpoch(100) - nm.SetNodes([]netmap.NodeInfo{node}) - - return &cfgQoSService{ - logger: test.NewLogger(t), - netmapSource: &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ - 100: nm, - }, - CurrentEpoch: 100, - }, - allowedCriticalPubs: [][]byte{ - allowedCritSigner.PublicKey().Bytes(), - }, - allowedInternalPubs: [][]byte{ - allowedIntSigner.PublicKey().Bytes(), - }, - }, - &testQoSServicePublicKeys{ - NetmapNode: nmSigner.PublicKey().Bytes(), - Request: reqSigner.PublicKey().Bytes(), - Internal: allowedIntSigner.PublicKey().Bytes(), - Critical: allowedCritSigner.PublicKey().Bytes(), - } -} - -type testQoSServicePublicKeys struct { - NetmapNode []byte - Request []byte - Internal []byte - Critical []byte -} diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index fbfe3f5e6..2f3c9cbfe 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -56,7 +55,7 @@ func initSessionService(c *cfg) { server := sessionTransportGRPC.New( sessionSvc.NewSignService( &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), + sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log), ), ) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 62af45389..f3ddc8cbe 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" "google.golang.org/grpc" @@ -30,16 +29,16 @@ type cnrSource struct { cli *containerClient.Client } -func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { - return c.src.Get(ctx, id) +func (c cnrSource) Get(id cid.ID) (*container.Container, error) { + return c.src.Get(id) } -func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { - return c.src.DeletionInfo(ctx, cid) +func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) { + return c.src.DeletionInfo(cid) } -func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { - return c.cli.ContainersOf(ctx, nil) +func (c cnrSource) List() ([]cid.ID, error) { + return c.cli.ContainersOf(nil) } func initTreeService(c *cfg) { @@ -52,12 +51,12 @@ func initTreeService(c *cfg) { c.treeService = tree.New( tree.WithContainerSource(cnrSource{ src: c.cfgObject.cnrSource, - cli: c.cnrClient, + cli: c.shared.cnrClient, }), - tree.WithFrostfsidSubjectProvider(c.frostfsidClient), + tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), + tree.WithLogger(c.log), tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), tree.WithContainerCacheSize(treeConfig.CacheSize()), tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), @@ -73,7 +72,7 @@ func initTreeService(c *cfg) { ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) + tree.RegisterTreeServiceServer(s, c.treeService) }) c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index 22d2e0aa9..ae52b9e4a 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -30,11 +30,6 @@ func validateConfig(c *config.Config) error { return fmt.Errorf("invalid logger destination: %w", err) } - err = loggerPrm.SetTags(loggerconfig.Tags(c)) - if err != nil { - return fmt.Errorf("invalid list of allowed tags: %w", err) - } - // shard configuration validation shardNum := 0 diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index 495365cf0..d9c0f167f 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -1,6 +1,7 @@ package main import ( + "os" "path/filepath" "testing" @@ -21,4 +22,17 @@ func TestValidate(t *testing.T) { require.NoError(t, err) }) }) + + t.Run("mainnet", func(t *testing.T) { + os.Clearenv() // ENVs have priority over config files, so we do this in tests + p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") + c := config.New(p, "", config.EnvPrefix) + require.NoError(t, validateConfig(c)) + }) + t.Run("testnet", func(t *testing.T) { + os.Clearenv() // ENVs have priority over config files, so we do this in tests + p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") + c := config.New(p, "", config.EnvPrefix) + require.NoError(t, validateConfig(c)) + }) } diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index 13f447af4..b8acf0143 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -51,13 +51,8 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { } cmd.PrintErrln(err) - for p := cmd; p != nil; p = p.Parent() { - if p.PersistentPostRun != nil { - p.PersistentPostRun(cmd, nil) - if !cobra.EnableTraverseRunHooks { - break - } - } + if cmd.PersistentPostRun != nil { + cmd.PersistentPostRun(cmd, nil) } os.Exit(code) } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index 5dd1a060e..f550552d2 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -27,15 +27,15 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - for endpoint := range node.NetworkEndpoints() { + netmap.IterateNetworkEndpoints(node, func(endpoint string) { cmd.Printf("%s ", endpoint) - } + }) cmd.Println() if !short { - for key, value := range node.Attributes() { + node.IterateAttributes(func(key, value string) { cmd.Printf("%s\t%s: %s\n", indent, key, value) - } + }) } } diff --git a/config/example/ir.env b/config/example/ir.env index c13044a6e..ebd91c243 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -1,7 +1,5 @@ FROSTFS_IR_LOGGER_LEVEL=info FROSTFS_IR_LOGGER_TIMESTAMP=true -FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" -FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_IR_WALLET_PATH=/path/to/wallet.json FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX diff --git a/config/example/ir.yaml b/config/example/ir.yaml index ed53f014b..49f9fd324 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -3,9 +3,6 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" timestamp: true - tags: - - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. - level: debug wallet: path: /path/to/wallet.json # Path to NEP-6 NEO wallet file diff --git a/config/example/node.env b/config/example/node.env index 9a2426358..b2a0633a9 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -1,8 +1,6 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_LOGGER_DESTINATION=journald FROSTFS_LOGGER_TIMESTAMP=true -FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" -FROSTFS_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 @@ -22,9 +20,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" FROSTFS_NODE_ATTRIBUTE_0=Price:11 FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" +FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state -FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db # Tree service section FROSTFS_TREE_ENABLED=true @@ -89,16 +87,14 @@ FROSTFS_REPLICATOR_POOL_SIZE=10 FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 # Object service section +FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 +FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_0_MAX_OPS=1000 -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 - # Storage engine section +FROSTFS_STORAGE_SHARD_POOL_SIZE=15 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 ## 0 shard ### Flag to refill Metabase from BlobStor @@ -123,8 +119,7 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true -FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest +FROSTFS_STORAGE_SHARD_0_COMPRESS=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 @@ -159,54 +154,6 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 #### Limit of concurrent workers collecting expired objects by the garbage collector FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 -#### Limits config -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 ## 1 shard ### Flag to refill Metabase from BlobStor @@ -278,6 +225,3 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" FROSTFS_MULTINET_BALANCER=roundrobin FROSTFS_MULTINET_RESTRICT=false FROSTFS_MULTINET_FALLBACK_DELAY=350ms - -FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" -FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" diff --git a/config/example/node.json b/config/example/node.json index 6b7a9c2c6..f3192ac2f 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -2,13 +2,7 @@ "logger": { "level": "debug", "destination": "journald", - "timestamp": true, - "tags": [ - { - "names": "main, morph", - "level": "debug" - } - ] + "timestamp": true }, "pprof": { "enabled": true, @@ -37,13 +31,13 @@ ], "attribute_0": "Price:11", "attribute_1": "UN-LOCODE:RU MSK", + "relay": true, "persistent_sessions": { "path": "/sessions" }, "persistent_state": { "path": "/state" - }, - "locode_db_path": "/path/to/locode/db" + } }, "grpc": { "0": { @@ -140,30 +134,16 @@ "tombstone_lifetime": 10 }, "put": { + "remote_pool_size": 100, + "local_pool_size": 200, "skip_session_token_issuer_verification": true }, "get": { "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] } }, - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ], - "max_ops": 1000 - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - }, "storage": { + "shard_pool_size": 15, "shard_ro_error_threshold": 100, "shard": { "0": { @@ -188,15 +168,12 @@ "max_batch_size": 100, "max_batch_delay": "10ms" }, - "compression": { - "enabled": true, - "level": "fastest", - "exclude_content_types": [ - "audio/*", "video/*" - ], - "estimate_compressibility": true, - "estimate_compressibility_threshold": 0.7 - }, + "compress": true, + "compression_exclude_content_types": [ + "audio/*", "video/*" + ], + "compression_estimate_compressibility": true, + "compression_estimate_compressibility_threshold": 0.7, "small_object_size": 102400, "blobstor": [ { @@ -229,87 +206,6 @@ "remover_sleep_interval": "2m", "expired_collector_batch_size": 1500, "expired_collector_worker_count": 15 - }, - "limits": { - "read": { - "max_running_ops": 10000, - "max_waiting_ops": 1000, - "idle_timeout": "30s", - "tags": [ - { - "tag": "internal", - "weight": 20, - "limit_ops": 0, - "reserved_ops": 1000 - }, - { - "tag": "client", - "weight": 70, - "reserved_ops": 10000 - }, - { - "tag": "background", - "weight": 5, - "limit_ops": 10000, - "reserved_ops": 0 - }, - { - "tag": "writecache", - "weight": 5, - "limit_ops": 25000 - }, - { - "tag": "policer", - "weight": 5, - "limit_ops": 25000, - "prohibited": true - }, - { - "tag": "treesync", - "weight": 5, - "limit_ops": 25 - } - ] - }, - "write": { - "max_running_ops": 1000, - "max_waiting_ops": 100, - "idle_timeout": "45s", - "tags": [ - { - "tag": "internal", - "weight": 200, - "limit_ops": 0, - "reserved_ops": 100 - }, - { - "tag": "client", - "weight": 700, - "reserved_ops": 1000 - }, - { - "tag": "background", - "weight": 50, - "limit_ops": 1000, - "reserved_ops": 0 - }, - { - "tag": "writecache", - "weight": 50, - "limit_ops": 2500 - }, - { - "tag": "policer", - "weight": 50, - "limit_ops": 2500 - }, - { - "tag": "treesync", - "weight": 50, - "limit_ops": 100 - } - ] - } } }, "1": { @@ -330,9 +226,7 @@ "max_batch_size": 200, "max_batch_delay": "20ms" }, - "compression": { - "enabled": false - }, + "compress": false, "small_object_size": 102400, "blobstor": [ { @@ -411,19 +305,5 @@ "balancer": "roundrobin", "restrict": false, "fallback_delay": "350ms" - }, - "qos": { - "critical": { - "authorized_keys": [ - "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", - "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" - ] - }, - "internal": { - "authorized_keys": [ - "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", - "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" - ] - } } } diff --git a/config/example/node.yaml b/config/example/node.yaml index 2d4bc90fb..a179b4704 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -2,9 +2,6 @@ logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" timestamp: true - tags: - - names: "main, morph" - level: debug systemdnotify: enabled: true @@ -34,11 +31,11 @@ node: - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK + relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node - "locode_db_path": "/path/to/locode/db" grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server @@ -82,8 +79,7 @@ contracts: # side chain NEOFS contract script hashes; optional, override values morph: dial_timeout: 30s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). - # Negative value disables caching. A zero value sets the default value. + cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching. # Default value: block time. It is recommended to have this value less or equal to block time. # Cached entities: containers, container lists, eACL tables. container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. @@ -98,9 +94,6 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 - netmap: - candidates: - poll_interval: 20s apiclient: dial_timeout: 15s # timeout for FrostFS API client connection @@ -123,23 +116,17 @@ object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs put: + remote_pool_size: 100 # number of async workers for remote PUT operations + local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true get: priority: # list of metrics of nodes for prioritization - $attribute:ClusterName - $attribute:UN-LOCODE -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 1000 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 - storage: + # note: shard configuration can be omitted for relay node (see `node.relay`) + shard_pool_size: 15 # size of per-shard worker pools used for PUT operations shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: @@ -153,7 +140,7 @@ storage: flush_worker_count: 30 # number of write-cache flusher threads metabase: - perm: 0o644 # permissions for metabase files(directories: +x for current user and group) + perm: 0644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms @@ -161,19 +148,18 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compression: - enabled: false # turn on/off zstd compression of stored objects + compress: false # turn on/off zstd(level 3) compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) + perm: 0644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) + - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: @@ -204,14 +190,12 @@ storage: max_batch_size: 100 max_batch_delay: 10ms - compression: - enabled: true # turn on/off zstd compression of stored objects - level: fastest - exclude_content_types: - - audio/* - - video/* - estimate_compressibility: true - estimate_compressibility_threshold: 0.7 + compress: true # turn on/off zstd(level 3) compression of stored objects + compression_exclude_content_types: + - audio/* + - video/* + compression_estimate_compressibility: true + compression_estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza @@ -234,59 +218,6 @@ storage: expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector - limits: - read: - max_running_ops: 10000 - max_waiting_ops: 1000 - idle_timeout: 30s - tags: - - tag: internal - weight: 20 - limit_ops: 0 - reserved_ops: 1000 - - tag: client - weight: 70 - reserved_ops: 10000 - - tag: background - weight: 5 - limit_ops: 10000 - reserved_ops: 0 - - tag: writecache - weight: 5 - limit_ops: 25000 - - tag: policer - weight: 5 - limit_ops: 25000 - prohibited: true - - tag: treesync - weight: 5 - limit_ops: 25 - write: - max_running_ops: 1000 - max_waiting_ops: 100 - idle_timeout: 45s - tags: - - tag: internal - weight: 200 - limit_ops: 0 - reserved_ops: 100 - - tag: client - weight: 700 - reserved_ops: 1000 - - tag: background - weight: 50 - limit_ops: 1000 - reserved_ops: 0 - - tag: writecache - weight: 50 - limit_ops: 2500 - - tag: policer - weight: 50 - limit_ops: 2500 - - tag: treesync - weight: 50 - limit_ops: 100 - 1: writecache: path: tmp/1/cache # write-cache root directory @@ -305,7 +236,7 @@ storage: pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0o644 # permission to use for the database file and intermediate directories + perm: 0644 # permission to use for the database file and intermediate directories tracing: enabled: true @@ -338,13 +269,3 @@ multinet: balancer: roundrobin restrict: false fallback_delay: 350ms - -qos: - critical: - authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - internal: - authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag - - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 - - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a diff --git a/config/mainnet/README.md b/config/mainnet/README.md new file mode 100644 index 000000000..717a9b0ff --- /dev/null +++ b/config/mainnet/README.md @@ -0,0 +1,28 @@ +# N3 Mainnet Storage node configuration + +Here is a template for simple storage node configuration in N3 Mainnet. +Make sure to specify correct values instead of `<...>` placeholders. +Do not change `contracts` section. Run the latest frostfs-node release with +the fixed config `frostfs-node -c config.yml` + +To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract. +The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221` +(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`) + +## Tips + +Use `grpcs://` scheme in the announced address if you enable TLS in grpc server. +```yaml +node: + addresses: + - grpcs://frostfs.my.org:8080 + +grpc: + num: 1 + 0: + endpoint: frostfs.my.org:8080 + tls: + enabled: true + certificate: /path/to/cert + key: /path/to/key +``` diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml new file mode 100644 index 000000000..d86ea451f --- /dev/null +++ b/config/mainnet/config.yml @@ -0,0 +1,70 @@ +node: + wallet: + path: + address: + password: + addresses: + - + attribute_0: UN-LOCODE: + attribute_1: Price:100000 + attribute_2: User-Agent:FrostFS\/0.9999 + +grpc: + num: 1 + 0: + endpoint: + tls: + enabled: false + +storage: + shard_num: 1 + shard: + 0: + metabase: + path: /storage/path/metabase + perm: 0600 + blobstor: + - path: /storage/path/blobovnicza + type: blobovnicza + perm: 0600 + opened_cache_capacity: 32 + depth: 1 + width: 1 + - path: /storage/path/fstree + type: fstree + perm: 0600 + depth: 4 + writecache: + enabled: false + gc: + remover_batch_size: 100 + remover_sleep_interval: 1m + +logger: + level: info + +prometheus: + enabled: true + address: localhost:9090 + shutdown_timeout: 15s + +object: + put: + remote_pool_size: 100 + local_pool_size: 100 + +morph: + rpc_endpoint: + - wss://rpc1.morph.frostfs.info:40341/ws + - wss://rpc2.morph.frostfs.info:40341/ws + - wss://rpc3.morph.frostfs.info:40341/ws + - wss://rpc4.morph.frostfs.info:40341/ws + - wss://rpc5.morph.frostfs.info:40341/ws + - wss://rpc6.morph.frostfs.info:40341/ws + - wss://rpc7.morph.frostfs.info:40341/ws + dial_timeout: 20s + +contracts: + balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 + container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 + netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 diff --git a/config/testnet/README.md b/config/testnet/README.md new file mode 100644 index 000000000..e2cda33ec --- /dev/null +++ b/config/testnet/README.md @@ -0,0 +1,129 @@ +# N3 Testnet Storage node configuration + +There is a prepared configuration for NeoFS Storage Node deployment in +N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared +docker image and run it with docker-compose. + +## Build image + +Prepared **frostfs-storage-testnet** image is available at Docker Hub. +However, if you need to rebuild it for some reason, run +`make image-storage-testnet` command. + +``` +$ make image-storage-testnet +... +Successfully built ab0557117b02 +Successfully tagged nspccdev/neofs-storage-testnet:0.25.1 +``` + +## Deploy node + +To run a storage node in N3 Testnet environment, you should deposit GAS assets, +update docker-compose file and start the node. + +### Deposit + +The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a +bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx. + +First, obtain GAS in N3 Testnet chain. You can do that with +[faucet](https://neowish.ngd.network) service. + +Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet. +You can provide scripthash in the `data` argument of transfer tx to make a +deposit to a specified account. Otherwise, deposit is made to the tx sender. + +NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`, +so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`. + +See a deposit example with `neo-go`. + +``` +neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \ +--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \ +--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \ +--token GAS \ +--amount 1 +``` + +### Configure + +Next, configure `node_config.env` file. Change endpoints values. Both +should contain your **public** IP. + +``` +NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 +NEOFS_NODE_ADDRESSES=65.52.183.157:36512 +``` + +Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory) +attribute. + +``` +NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 +NEOFS_NODE_ADDRESSES=65.52.183.157:36512 +NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED +``` + +You can validate UN/LOCODE attribute in +[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0) +with frostfs-cli. + +``` +$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED' +Country: Russia +Location: Saint Petersburg (ex Leningrad) +Continent: Europe +Subdivision: [SPE] Sankt-Peterburg +Coordinates: 59.53, 30.15 +``` + +It is recommended to pass the node's key as a file. To do so, convert your wallet +WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file. + +``` +// Print WIF in a 32-byte hex format +$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s +PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56 +PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059 +WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s +Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ +ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc +ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf + +// Save 32-byte hex into a file +$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key +``` + +Then, specify the path to this file in `docker-compose.yml` +```yaml + volumes: + - frostfs_storage:/storage + - ./my_wallet.key:/node.key +``` + + +NeoFS objects will be stored on your machine. By default, docker-compose +is configured to store objects in named docker volume `frostfs_storage`. You can +specify a directory on the filesystem to store objects there. + +```yaml + volumes: + - /home/username/frostfs/rc3/storage:/storage + - ./my_wallet.key:/node.key +``` + +### Start + +Run the node with `docker-compose up` command and stop it with `docker-compose down`. + +### Debug + +To print node logs, use `docker logs frostfs-testnet`. To print debug messages in +log, set up log level to debug with this env: + +```yaml + environment: + - NEOFS_LOGGER_LEVEL=debug +``` diff --git a/config/testnet/config.yml b/config/testnet/config.yml new file mode 100644 index 000000000..76b36cdf6 --- /dev/null +++ b/config/testnet/config.yml @@ -0,0 +1,52 @@ +logger: + level: info + +morph: + rpc_endpoint: + - wss://rpc01.morph.testnet.frostfs.info:51331/ws + - wss://rpc02.morph.testnet.frostfs.info:51331/ws + - wss://rpc03.morph.testnet.frostfs.info:51331/ws + - wss://rpc04.morph.testnet.frostfs.info:51331/ws + - wss://rpc05.morph.testnet.frostfs.info:51331/ws + - wss://rpc06.morph.testnet.frostfs.info:51331/ws + - wss://rpc07.morph.testnet.frostfs.info:51331/ws + dial_timeout: 20s + +contracts: + balance: e0420c216003747626670d1424569c17c79015bf + container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 + netmap: d4b331639799e2958d4bc5b711b469d79de94e01 + +node: + key: /node.key + attribute_0: Deployed:SelfHosted + attribute_1: User-Agent:FrostFS\/0.9999 + +prometheus: + enabled: true + address: localhost:9090 + shutdown_timeout: 15s + +storage: + shard_num: 1 + shard: + 0: + metabase: + path: /storage/metabase + perm: 0777 + blobstor: + - path: /storage/path/blobovnicza + type: blobovnicza + perm: 0600 + opened_cache_capacity: 32 + depth: 1 + width: 1 + - path: /storage/path/fstree + type: fstree + perm: 0600 + depth: 4 + writecache: + enabled: false + gc: + remover_batch_size: 100 + remover_sleep_interval: 1m diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json index b68ce4fa3..6abf5ecdc 100644 --- a/dev/.vscode-example/launch.json +++ b/dev/.vscode-example/launch.json @@ -42,6 +42,7 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", + "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080", @@ -97,6 +98,7 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", + "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082", @@ -152,6 +154,7 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", + "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084", @@ -207,6 +210,7 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", + "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086", diff --git a/docs/release-instruction.md b/docs/release-instruction.md index aa867e83c..18659c699 100644 --- a/docs/release-instruction.md +++ b/docs/release-instruction.md @@ -95,15 +95,19 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} ## Post-release -### Prepare and push images to a Docker registry (automated) +### Prepare and push images to a Docker Hub (if not automated) -Create Docker images for all applications and push them into container registry -(executed automatically in Forgejo Actions upon pushing a release tag): +Create Docker images for all applications and push them into Docker Hub +(requires [organization](https://hub.docker.com/u/truecloudlab) privileges) ```shell $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ make images -$ make push-images +$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION} +$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION} +$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION} +$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION} +$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION} ``` ### Make a proper release (if not automated) diff --git a/docs/shard-modes.md b/docs/shard-modes.md index 6cc4ab13c..3b459335b 100644 --- a/docs/shard-modes.md +++ b/docs/shard-modes.md @@ -51,7 +51,10 @@ However, all mode changing operations are idempotent. ## Automatic mode changes -A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. +Shard can automatically switch to a `degraded-read-only` mode in 3 cases: +1. If the metabase was not available or couldn't be opened/initialized during shard startup. +2. If shard error counter exceeds threshold. +3. If the metabase couldn't be reopened during SIGHUP handling. # Detach shard diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index da9fdfed0..98d72cb69 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -12,23 +12,21 @@ There are some custom types used for brevity: # Structure -| Section | Description | -|--------------|---------------------------------------------------------| -| `node` | [Node parameters](#node-section) | -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | -| `qos` | [QoS configuration](#qos-section) | +| Section | Description | +|------------------------|---------------------------------------------------------------------| +| `logger` | [Logging parameters](#logger-section) | +| `pprof` | [PProf configuration](#pprof-section) | +| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | +| `control` | [Control service configuration](#control-section) | +| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | +| `morph` | [N3 blockchain client configuration](#morph-section) | +| `apiclient` | [FrostFS API client configuration](#apiclient-section) | +| `policer` | [Policer service configuration](#policer-section) | +| `replicator` | [Replicator service configuration](#replicator-section) | +| `storage` | [Storage engine configuration](#storage-section) | +| `runtime` | [Runtime configuration](#runtime-section) | +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | # `control` section ```yaml @@ -112,21 +110,11 @@ Contains logger parameters. ```yaml logger: level: info - tags: - - names: "main, morph" - level: debug ``` -| Parameter | Type | Default value | Description | -|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | -| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | - -## `tags` subsection -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | -| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|---------------------------------------------------------------------------------------------------| +| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract @@ -159,19 +147,15 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 switch_interval: 2m - netmap: - candidates: - poll_interval: 20s ``` -| Parameter | Type | Default value | Description | -|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | -| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | +| Parameter | Type | Default value | Description | +| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | +| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | +| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | +| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | +| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | ## `rpc_endpoint` subsection | Parameter | Type | Default value | Description | @@ -185,6 +169,7 @@ Local storage engine configuration. | Parameter | Type | Default value | Description | |----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| +| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. | | `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | | `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | | `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | @@ -195,41 +180,20 @@ Contains configuration for each shard. Keys must be consecutive numbers starting `default` subsection has the same format and specifies defaults for missing values. The following table describes configuration for each shard. -| Parameter | Type | Default value | Description | -| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | -| `compression` | [Compression config](#compression-subsection) | | Compression config. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | -| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | - -### `compression` subsection - -Contains compression config. - -```yaml -compression: - enabled: true - level: smallest_size - exclude_content_types: - - audio/* - - video/* - estimate_compressibility: true - estimate_compressibility_threshold: 0.7 -``` - -| Parameter | Type | Default value | Description | -| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `enabled` | `bool` | `false` | Flag to enable compression. | -| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | -| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | +| Parameter | Type | Default value | Description | +| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `compress` | `bool` | `false` | Flag to enable compression. | +| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | +| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | +| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | +| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | +| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | +| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | +| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | +| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | +| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | +| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | +| `gc` | [GC config](#gc-subsection) | | GC configuration. | ### `blobstor` subsection @@ -244,7 +208,7 @@ blobstor: width: 4 - type: fstree path: /path/to/blobstor/blobovnicza - perm: 0o644 + perm: 0644 size: 4194304 depth: 1 width: 4 @@ -304,7 +268,7 @@ gc: ```yaml metabase: path: /path/to/meta.db - perm: 0o644 + perm: 0644 max_batch_size: 200 max_batch_delay: 20ms ``` @@ -336,65 +300,6 @@ writecache: | `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | | `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | -### `limits` subsection - -```yaml -limits: - max_read_running_ops: 10000 - max_read_waiting_ops: 1000 - max_write_running_ops: 1000 - max_write_waiting_ops: 100 - read: - - tag: internal - weight: 20 - limit_ops: 0 - reserved_ops: 1000 - - tag: client - weight: 70 - reserved_ops: 10000 - - tag: background - weight: 5 - limit_ops: 10000 - reserved_ops: 0 - - tag: writecache - weight: 5 - limit_ops: 25000 - - tag: policer - weight: 5 - limit_ops: 25000 - write: - - tag: internal - weight: 200 - limit_ops: 0 - reserved_ops: 100 - - tag: client - weight: 700 - reserved_ops: 1000 - - tag: background - weight: 50 - limit_ops: 1000 - reserved_ops: 0 - - tag: writecache - weight: 50 - limit_ops: 2500 - - tag: policer - weight: 50 - limit_ops: 2500 -``` - -| Parameter | Type | Default value | Description | -| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | -| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | -| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | -| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | -| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | -| `read` | `[]tag` | empty | Array of shard read settings for tags. | -| `write` | `[]tag` | empty | Array of shard write settings for tags. | -| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | -| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | -| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | -| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | -| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | # `node` section @@ -410,22 +315,22 @@ node: - "Price:11" - "UN-LOCODE:RU MSK" - "key:value" + relay: false persistent_sessions: path: /sessions persistent_state: path: /state - locode_db_path: "/path/to/locode/db" ``` -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | -| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | +| Parameter | Type | Default value | Description | +|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------| +| `key` | `string` | | Path to the binary-encoded private key. | +| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | +| `addresses` | `[]string` | | Addresses advertised in the netmap. | +| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | +| `relay` | `bool` | | Enable relay mode. | +| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | +| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | ## `wallet` subsection N3 wallet configuration. @@ -490,16 +395,18 @@ replicator: pool_size: 10 ``` -| Parameter | Type | Default value | Description | -|---------------|------------|---------------|---------------------------------------------| -| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | -| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. | +| Parameter | Type | Default value | Description | +|---------------|------------|----------------------------------------|---------------------------------------------| +| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | +| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. | # `object` section Contains object-service related parameters. ```yaml object: + put: + remote_pool_size: 100 get: priority: - $attribute:ClusterName @@ -508,29 +415,10 @@ object: | Parameter | Type | Default value | Description | |-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| | `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | +| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | +| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | | `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | - -# `rpc` section -Contains limits on the number of active RPC for specified method(s). - -```yaml -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 1000 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 -``` - -| Parameter | Type | Default value | Description | -|------------------|------------|---------------|--------------------------------------------------------------| -| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) | -| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit | - # `runtime` section Contains runtime parameters. @@ -583,20 +471,3 @@ multinet: | `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | | `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | | `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | - -# `qos` section -```yaml -qos: - critical: - authorized_keys: - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 - internal: - authorized_keys: - - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 -``` -| Parameter | Type | Default value | Description | -| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- | -| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. | -| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. | diff --git a/go.mod b/go.mod index 6f1950936..8f4053872 100644 --- a/go.mod +++ b/go.mod @@ -1,18 +1,17 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.23.0 +go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa + git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 @@ -28,7 +27,7 @@ require ( github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.15.0 + github.com/multiformats/go-multiaddr v0.12.1 github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 @@ -41,14 +40,15 @@ require ( github.com/ssgreg/journald v1.0.0 github.com/stretchr/testify v1.9.0 go.etcd.io/bbolt v1.3.10 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel/trace v1.28.0 go.uber.org/zap v1.27.0 - golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 - golang.org/x/term v0.30.0 - google.golang.org/grpc v1.69.2 - google.golang.org/protobuf v1.36.1 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.22.0 + golang.org/x/term v0.21.0 + google.golang.org/grpc v1.66.2 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 ) @@ -85,9 +85,9 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -119,18 +119,17 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.4.0 // indirect + lukechampine.com/blake3 v1.2.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 5b075f60a..d63396202 100644 --- a/go.sum +++ b/go.sum @@ -1,25 +1,23 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467 h1:MH9uHZFZNyUCL+YChiDcVeXPjhTDcFDeoGr8Mc8NY9M= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= @@ -108,8 +106,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -145,14 +141,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -192,8 +188,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= -github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= +github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -294,22 +290,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -324,15 +318,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -345,16 +339,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -381,16 +375,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -398,26 +392,26 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -426,8 +420,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -445,7 +439,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/internal/assert/cond.go b/internal/assert/cond.go deleted file mode 100644 index 113d2eba9..000000000 --- a/internal/assert/cond.go +++ /dev/null @@ -1,29 +0,0 @@ -package assert - -import ( - "fmt" - "strings" -) - -func True(cond bool, details ...string) { - if !cond { - panic(strings.Join(details, " ")) - } -} - -func False(cond bool, details ...string) { - if cond { - panic(strings.Join(details, " ")) - } -} - -func NoError(err error, details ...string) { - if err != nil { - content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) - panic(content) - } -} - -func Fail(details ...string) { - panic(strings.Join(details, " ")) -} diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 626372f43..b24f3593d 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -125,6 +125,7 @@ const ( SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" SearchLocalOperationFailed = "local operation failed" UtilObjectServiceError = "object service error" + UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" @@ -145,6 +146,7 @@ const ( ClientCantGetBlockchainHeight = "can't get blockchain height" ClientCantGetBlockchainHeight243 = "can't get blockchain height" EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" + EventCouldNotStartListenToEvents = "could not start listen to events" EventStopEventListenerByError = "stop event listener by error" EventStopEventListenerByContext = "stop event listener by context" EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" @@ -198,7 +200,6 @@ const ( EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" EngineInterruptGettingLockers = "can't get object's lockers" EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" - EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" @@ -253,7 +254,6 @@ const ( ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" - ShardCouldNotFindObject = "could not find object" WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" @@ -384,6 +384,7 @@ const ( FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" FrostFSNodeConfigurationReading = "configuration reading" + FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" FrostFSNodeTracingConfigationUpdated = "tracing configation updated" FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" @@ -511,11 +512,4 @@ const ( BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" WritecacheCantGetObject = "can't get an object from fstree" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" - FailedToParseIncomingIOTag = "failed to parse incoming IO tag" - NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" - FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" - FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" - WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" - FailedToUpdateNetmapCandidates = "update netmap candidates failed" - UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" ) diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go index 9123541ff..cb165de69 100644 --- a/internal/metrics/consts.go +++ b/internal/metrics/consts.go @@ -23,7 +23,6 @@ const ( policerSubsystem = "policer" commonCacheSubsystem = "common_cache" multinetSubsystem = "multinet" - qosSubsystem = "qos" successLabel = "success" shardIDLabel = "shard_id" @@ -44,7 +43,6 @@ const ( hitLabel = "hit" cacheLabel = "cache" sourceIPLabel = "source_ip" - ioTagLabel = "io_tag" readWriteMode = "READ_WRITE" readOnlyMode = "READ_ONLY" diff --git a/internal/metrics/node.go b/internal/metrics/node.go index 8ade19eb2..4ea3c7c24 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -26,7 +26,6 @@ type NodeMetrics struct { morphCache *morphCacheMetrics log logger.LogMetrics multinet *multinetMetrics - qos *QoSMetrics // nolint: unused appInfo *ApplicationInfo } @@ -56,7 +55,6 @@ func NewNodeMetrics() *NodeMetrics { log: logger.NewLogMetrics(namespace), appInfo: NewApplicationInfo(misc.Version), multinet: newMultinetMetrics(namespace), - qos: newQoSMetrics(), } } @@ -128,7 +126,3 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics { func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { return m.multinet } - -func (m *NodeMetrics) QoSMetrics() *QoSMetrics { - return m.qos -} diff --git a/internal/metrics/object.go b/internal/metrics/object.go index e4f6dfde1..0ba994ed3 100644 --- a/internal/metrics/object.go +++ b/internal/metrics/object.go @@ -9,14 +9,13 @@ import ( ) type ObjectServiceMetrics interface { - AddRequestDuration(method string, d time.Duration, success bool, ioTag string) + AddRequestDuration(method string, d time.Duration, success bool) AddPayloadSize(method string, size int) } type objectServiceMetrics struct { - methodDuration *prometheus.HistogramVec - payloadCounter *prometheus.CounterVec - ioTagOpsCounter *prometheus.CounterVec + methodDuration *prometheus.HistogramVec + payloadCounter *prometheus.CounterVec } func newObjectServiceMetrics() *objectServiceMetrics { @@ -33,24 +32,14 @@ func newObjectServiceMetrics() *objectServiceMetrics { Name: "request_payload_bytes", Help: "Object Service request payload", }, []string{methodLabel}), - ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: objectSubsystem, - Name: "requests_total", - Help: "Count of requests for each IO tag", - }, []string{methodLabel, ioTagLabel}), } } -func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { +func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) { m.methodDuration.With(prometheus.Labels{ methodLabel: method, successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) - m.ioTagOpsCounter.With(prometheus.Labels{ - ioTagLabel: ioTag, - methodLabel: method, - }).Inc() } func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go deleted file mode 100644 index be6878142..000000000 --- a/internal/metrics/qos.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" - "github.com/prometheus/client_golang/prometheus" -) - -type QoSMetrics struct { - opsCounter *prometheus.GaugeVec -} - -func newQoSMetrics() *QoSMetrics { - return &QoSMetrics{ - opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: qosSubsystem, - Name: "operations_total", - Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", - }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), - } -} - -func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) { - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "pending", - }).Set(float64(pending)) - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "in_progress", - }).Set(float64(inProgress)) - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "completed", - }).Set(float64(completed)) - m.opsCounter.With(prometheus.Labels{ - shardIDLabel: shardID, - operationLabel: operation, - ioTagLabel: tag, - typeLabel: "resource_exhausted", - }).Set(float64(resourceExhausted)) -} - -func (m *QoSMetrics) Close(shardID string) { - m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) -} diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go index e192c4398..6702aa83c 100644 --- a/internal/metrics/treeservice.go +++ b/internal/metrics/treeservice.go @@ -12,14 +12,12 @@ type TreeMetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) - AddOperation(string, string) } type treeServiceMetrics struct { replicateTaskDuration *prometheus.HistogramVec replicateWaitDuration *prometheus.HistogramVec syncOpDuration *prometheus.HistogramVec - ioTagOpsCounter *prometheus.CounterVec } var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) @@ -44,12 +42,6 @@ func newTreeServiceMetrics() *treeServiceMetrics { Name: "sync_duration_seconds", Help: "Duration of synchronization operations", }, []string{successLabel}), - ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: treeServiceSubsystem, - Name: "requests_total", - Help: "Count of requests for each IO tag", - }, []string{methodLabel, ioTagLabel}), } } @@ -70,10 +62,3 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) { successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) } - -func (m *treeServiceMetrics) AddOperation(op string, ioTag string) { - m.ioTagOpsCounter.With(prometheus.Labels{ - ioTagLabel: ioTag, - methodLabel: op, - }).Inc() -} diff --git a/internal/qos/config.go b/internal/qos/config.go deleted file mode 100644 index d90b403b5..000000000 --- a/internal/qos/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package qos - -import ( - "math" - "time" -) - -const ( - NoLimit int64 = math.MaxInt64 - DefaultIdleTimeout = 5 * time.Minute -) - -type LimiterConfig struct { - Read OpConfig - Write OpConfig -} - -type OpConfig struct { - MaxWaitingOps int64 - MaxRunningOps int64 - IdleTimeout time.Duration - Tags []IOTagConfig -} - -type IOTagConfig struct { - Tag string - Weight *float64 - LimitOps *float64 - ReservedOps *float64 - Prohibited bool -} diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go deleted file mode 100644 index 58cd9e52c..000000000 --- a/internal/qos/grpc.go +++ /dev/null @@ -1,86 +0,0 @@ -package qos - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "google.golang.org/grpc" -) - -func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String()) - return handler(ctx, req) - } -} - -func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - rawTag, ok := tagging.IOTagFromContext(ctx) - if !ok { - return invoker(ctx, method, req, reply, cc, opts...) - } - tag, err := FromRawString(rawTag) - if err != nil { - tag = IOTagClient - } - if tag.IsLocal() { - tag = IOTagInternal - } - ctx = tagging.ContextWithIOTag(ctx, tag.String()) - return invoker(ctx, method, req, reply, cc, opts...) - } -} - -func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor { - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - rawTag, ok := tagging.IOTagFromContext(ctx) - if !ok { - return streamer(ctx, desc, cc, method, opts...) - } - tag, err := FromRawString(rawTag) - if err != nil { - tag = IOTagClient - } - if tag.IsLocal() { - tag = IOTagInternal - } - ctx = tagging.ContextWithIOTag(ctx, tag.String()) - return streamer(ctx, desc, cc, method, opts...) - } -} - -func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() { - return handler(ctx, req) - } - - release, ok := getLimiter().Acquire(info.FullMethod) - if !ok { - return nil, new(apistatus.ResourceExhausted) - } - defer release() - - return handler(ctx, req) - } -} - -//nolint:contextcheck (grpc.ServerStream manages the context itself) -func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor { - return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() { - return handler(srv, ss) - } - - release, ok := getLimiter().Acquire(info.FullMethod) - if !ok { - return new(apistatus.ResourceExhausted) - } - defer release() - - return handler(srv, ss) - } -} diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go deleted file mode 100644 index 7d0826754..000000000 --- a/internal/qos/grpc_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package qos_test - -import ( - "context" - "errors" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -const ( - okKey = "ok" -) - -var ( - errTest = errors.New("mock") - errWrongTag = errors.New("wrong tag") - errNoTag = errors.New("failed to get tag from context") - errResExhausted *apistatus.ResourceExhausted - tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} -) - -type mockGRPCServerStream struct { - grpc.ServerStream - - ctx context.Context -} - -func (m *mockGRPCServerStream) Context() context.Context { - return m.ctx -} - -type limiter struct { - acquired bool - released bool -} - -func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { - l.acquired = true - if key != okKey { - return nil, false - } - return func() { l.released = true }, true -} - -func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { - interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) - handler := func(ctx context.Context, req any) (any, error) { - return nil, errTest - } - _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) - return err -} - -func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { - interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) - handler := func(srv any, stream grpc.ServerStream) error { - return errTest - } - err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ - FullMethod: methodName, - }, handler) - return err -} - -func Test_MaxActiveRPCLimiter(t *testing.T) { - // UnaryServerInterceptor - t.Run("unary fail", func(t *testing.T) { - var lim limiter - - err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") - require.ErrorAs(t, err, &errResExhausted) - require.True(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("unary pass critical", func(t *testing.T) { - var lim limiter - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - - err := unaryMaxActiveRPCLimiter(ctx, &lim, "") - require.ErrorIs(t, err, errTest) - require.False(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("unary pass", func(t *testing.T) { - var lim limiter - - err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) - require.ErrorIs(t, err, errTest) - require.True(t, lim.acquired) - require.True(t, lim.released) - }) - // StreamServerInterceptor - t.Run("stream fail", func(t *testing.T) { - var lim limiter - - err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") - require.ErrorAs(t, err, &errResExhausted) - require.True(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("stream pass critical", func(t *testing.T) { - var lim limiter - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - - err := streamMaxActiveRPCLimiter(ctx, &lim, "") - require.ErrorIs(t, err, errTest) - require.False(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("stream pass", func(t *testing.T) { - var lim limiter - - err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) - require.ErrorIs(t, err, errTest) - require.True(t, lim.acquired) - require.True(t, lim.released) - }) -} - -func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { - interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() - called := false - handler := func(ctx context.Context, req any) (any, error) { - called = true - if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { - return nil, nil - } - return nil, errWrongTag - } - _, err := interceptor(context.Background(), nil, nil, handler) - require.NoError(t, err) - require.True(t, called) -} - -func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { - interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() - - // check context with no value - called := false - invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - called = true - if _, ok := tagging.IOTagFromContext(ctx); ok { - return fmt.Errorf("%v: expected no IO tags", errWrongTag) - } - return nil - } - require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) - require.True(t, called) - - // check context for internal tag - targetTag := qos.IOTagInternal.String() - invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - raw, ok := tagging.IOTagFromContext(ctx) - if !ok { - return errNoTag - } - if raw != targetTag { - return errWrongTag - } - return nil - } - for _, tag := range tags { - ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) - require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) - } - - // check context for client tag - ctx := tagging.ContextWithIOTag(context.Background(), "") - targetTag = qos.IOTagClient.String() - require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) -} - -func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { - interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() - - // check context with no value - called := false - streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - called = true - if _, ok := tagging.IOTagFromContext(ctx); ok { - return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) - } - return nil, nil - } - _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) - require.True(t, called) - require.NoError(t, err) - - // check context for internal tag - targetTag := qos.IOTagInternal.String() - streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - raw, ok := tagging.IOTagFromContext(ctx) - if !ok { - return nil, errNoTag - } - if raw != targetTag { - return nil, errWrongTag - } - return nil, nil - } - for _, tag := range tags { - ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) - _, err := interceptor(ctx, nil, nil, "", streamer, nil) - require.NoError(t, err) - } - - // check context for client tag - ctx := tagging.ContextWithIOTag(context.Background(), "") - targetTag = qos.IOTagClient.String() - _, err = interceptor(ctx, nil, nil, "", streamer, nil) - require.NoError(t, err) -} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go deleted file mode 100644 index 2d7de32fc..000000000 --- a/internal/qos/limiter.go +++ /dev/null @@ -1,246 +0,0 @@ -package qos - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" -) - -const ( - defaultIdleTimeout time.Duration = 0 - defaultShare float64 = 1.0 - minusOne = ^uint64(0) - - defaultMetricsCollectTimeout = 5 * time.Second -) - -type ReleaseFunc scheduling.ReleaseFunc - -type Limiter interface { - ReadRequest(context.Context) (ReleaseFunc, error) - WriteRequest(context.Context) (ReleaseFunc, error) - SetParentID(string) - SetMetrics(Metrics) - Close() -} - -type scheduler interface { - RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error) - Close() -} - -func NewLimiter(c LimiterConfig) (Limiter, error) { - if err := c.Validate(); err != nil { - return nil, err - } - readScheduler, err := createScheduler(c.Read) - if err != nil { - return nil, fmt.Errorf("create read scheduler: %w", err) - } - writeScheduler, err := createScheduler(c.Write) - if err != nil { - return nil, fmt.Errorf("create write scheduler: %w", err) - } - l := &mClockLimiter{ - readScheduler: readScheduler, - writeScheduler: writeScheduler, - closeCh: make(chan struct{}), - wg: &sync.WaitGroup{}, - readStats: createStats(), - writeStats: createStats(), - } - l.shardID.Store(&shardID{}) - l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}}) - l.startMetricsCollect() - return l, nil -} - -func createScheduler(config OpConfig) (scheduler, error) { - if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { - return newSemaphoreScheduler(config.MaxRunningOps), nil - } - return scheduling.NewMClock( - uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps), - converToSchedulingTags(config.Tags), config.IdleTimeout) -} - -func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { - result := make(map[string]scheduling.TagInfo) - for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { - result[tag.String()] = scheduling.TagInfo{ - Share: defaultShare, - } - } - for _, l := range limits { - v := result[l.Tag] - if l.Weight != nil && *l.Weight != 0 { - v.Share = *l.Weight - } - if l.LimitOps != nil && *l.LimitOps != 0 { - v.LimitIOPS = l.LimitOps - } - if l.ReservedOps != nil && *l.ReservedOps != 0 { - v.ReservedIOPS = l.ReservedOps - } - v.Prohibited = l.Prohibited - result[l.Tag] = v - } - return result -} - -var ( - _ Limiter = (*noopLimiter)(nil) - releaseStub ReleaseFunc = func() {} - noopLimiterInstance = &noopLimiter{} -) - -func NewNoopLimiter() Limiter { - return noopLimiterInstance -} - -type noopLimiter struct{} - -func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { - return releaseStub, nil -} - -func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { - return releaseStub, nil -} - -func (n *noopLimiter) SetParentID(string) {} - -func (n *noopLimiter) Close() {} - -func (n *noopLimiter) SetMetrics(Metrics) {} - -var _ Limiter = (*mClockLimiter)(nil) - -type shardID struct { - id string -} - -type mClockLimiter struct { - readScheduler scheduler - writeScheduler scheduler - - readStats map[string]*stat - writeStats map[string]*stat - - shardID atomic.Pointer[shardID] - metrics atomic.Pointer[metricsHolder] - closeCh chan struct{} - wg *sync.WaitGroup -} - -func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { - return requestArrival(ctx, n.readScheduler, n.readStats) -} - -func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { - return requestArrival(ctx, n.writeScheduler, n.writeStats) -} - -func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - tag, ok := tagging.IOTagFromContext(ctx) - if !ok { - tag = IOTagClient.String() - } - stat := getStat(tag, stats) - stat.pending.Add(1) - if tag == IOTagCritical.String() { - stat.inProgress.Add(1) - return func() { - stat.completed.Add(1) - }, nil - } - rel, err := s.RequestArrival(ctx, tag) - stat.inProgress.Add(1) - if err != nil { - if isResourceExhaustedErr(err) { - stat.resourceExhausted.Add(1) - return nil, &apistatus.ResourceExhausted{} - } - stat.completed.Add(1) - return nil, err - } - return func() { - rel() - stat.completed.Add(1) - }, nil -} - -func (n *mClockLimiter) Close() { - n.readScheduler.Close() - n.writeScheduler.Close() - close(n.closeCh) - n.wg.Wait() - n.metrics.Load().metrics.Close(n.shardID.Load().id) -} - -func (n *mClockLimiter) SetParentID(parentID string) { - n.shardID.Store(&shardID{id: parentID}) -} - -func (n *mClockLimiter) SetMetrics(m Metrics) { - n.metrics.Store(&metricsHolder{metrics: m}) -} - -func (n *mClockLimiter) startMetricsCollect() { - n.wg.Add(1) - go func() { - defer n.wg.Done() - - ticker := time.NewTicker(defaultMetricsCollectTimeout) - defer ticker.Stop() - for { - select { - case <-n.closeCh: - return - case <-ticker.C: - shardID := n.shardID.Load().id - if shardID == "" { - continue - } - metrics := n.metrics.Load().metrics - exportMetrics(metrics, n.readStats, shardID, "read") - exportMetrics(metrics, n.writeStats, shardID, "write") - } - } - }() -} - -func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) { - var pending uint64 - var inProgress uint64 - var completed uint64 - var resExh uint64 - for tag, s := range stats { - pending = s.pending.Load() - inProgress = s.inProgress.Load() - completed = s.completed.Load() - resExh = s.resourceExhausted.Load() - if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 { - continue - } - metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) - } -} - -func isResourceExhaustedErr(err error) bool { - return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || - errors.Is(err, errSemaphoreLimitExceeded) || - errors.Is(err, scheduling.ErrTagRequestsProhibited) -} diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go deleted file mode 100644 index c00da51b7..000000000 --- a/internal/qos/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -package qos - -import "sync/atomic" - -type Metrics interface { - SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) - Close(shardID string) -} - -var _ Metrics = (*noopMetrics)(nil) - -type noopMetrics struct{} - -func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) { -} - -func (n *noopMetrics) Close(string) {} - -// stat presents limiter statistics cumulative counters. -// -// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`. -type stat struct { - completed atomic.Uint64 - pending atomic.Uint64 - resourceExhausted atomic.Uint64 - inProgress atomic.Uint64 -} - -type metricsHolder struct { - metrics Metrics -} diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go deleted file mode 100644 index 74e6928f3..000000000 --- a/internal/qos/semaphore.go +++ /dev/null @@ -1,39 +0,0 @@ -package qos - -import ( - "context" - "errors" - - qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" - "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" -) - -var ( - _ scheduler = (*semaphore)(nil) - errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded") -) - -type semaphore struct { - s *qosSemaphore.Semaphore -} - -func newSemaphoreScheduler(size int64) *semaphore { - return &semaphore{ - s: qosSemaphore.NewSemaphore(size), - } -} - -func (s *semaphore) Close() {} - -func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - if s.s.Acquire() { - return s.s.Release, nil - } - return nil, errSemaphoreLimitExceeded -} diff --git a/internal/qos/stats.go b/internal/qos/stats.go deleted file mode 100644 index 3ecfad9f9..000000000 --- a/internal/qos/stats.go +++ /dev/null @@ -1,29 +0,0 @@ -package qos - -const unknownStatsTag = "unknown" - -var statTags = map[string]struct{}{ - IOTagBackground.String(): {}, - IOTagClient.String(): {}, - IOTagCritical.String(): {}, - IOTagInternal.String(): {}, - IOTagPolicer.String(): {}, - IOTagTreeSync.String(): {}, - IOTagWritecache.String(): {}, - unknownStatsTag: {}, -} - -func createStats() map[string]*stat { - result := make(map[string]*stat) - for tag := range statTags { - result[tag] = &stat{} - } - return result -} - -func getStat(tag string, stats map[string]*stat) *stat { - if v, ok := stats[tag]; ok { - return v - } - return stats[unknownStatsTag] -} diff --git a/internal/qos/tags.go b/internal/qos/tags.go deleted file mode 100644 index e3f7cafd6..000000000 --- a/internal/qos/tags.go +++ /dev/null @@ -1,59 +0,0 @@ -package qos - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" -) - -type IOTag string - -const ( - IOTagBackground IOTag = "background" - IOTagClient IOTag = "client" - IOTagCritical IOTag = "critical" - IOTagInternal IOTag = "internal" - IOTagPolicer IOTag = "policer" - IOTagTreeSync IOTag = "treesync" - IOTagWritecache IOTag = "writecache" - - ioTagUnknown IOTag = "" -) - -func FromRawString(s string) (IOTag, error) { - switch s { - case string(IOTagBackground): - return IOTagBackground, nil - case string(IOTagClient): - return IOTagClient, nil - case string(IOTagCritical): - return IOTagCritical, nil - case string(IOTagInternal): - return IOTagInternal, nil - case string(IOTagPolicer): - return IOTagPolicer, nil - case string(IOTagTreeSync): - return IOTagTreeSync, nil - case string(IOTagWritecache): - return IOTagWritecache, nil - default: - return ioTagUnknown, fmt.Errorf("unknown tag %s", s) - } -} - -func (t IOTag) String() string { - return string(t) -} - -func IOTagFromContext(ctx context.Context) string { - tag, ok := tagging.IOTagFromContext(ctx) - if !ok { - tag = "undefined" - } - return tag -} - -func (t IOTag) IsLocal() bool { - return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync -} diff --git a/internal/qos/validate.go b/internal/qos/validate.go deleted file mode 100644 index 70f1f24e8..000000000 --- a/internal/qos/validate.go +++ /dev/null @@ -1,91 +0,0 @@ -package qos - -import ( - "errors" - "fmt" - "math" -) - -var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") - -type tagConfig struct { - Shares, Limit, Reserved *float64 -} - -func (c *LimiterConfig) Validate() error { - if err := validateOpConfig(c.Read); err != nil { - return fmt.Errorf("limits 'read' section validation error: %w", err) - } - if err := validateOpConfig(c.Write); err != nil { - return fmt.Errorf("limits 'write' section validation error: %w", err) - } - return nil -} - -func validateOpConfig(c OpConfig) error { - if c.MaxRunningOps <= 0 { - return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) - } - if c.MaxWaitingOps <= 0 { - return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps) - } - if c.IdleTimeout <= 0 { - return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String()) - } - if err := validateTags(c.Tags); err != nil { - return fmt.Errorf("'tags' config section validation error: %w", err) - } - return nil -} - -func validateTags(configTags []IOTagConfig) error { - tags := map[IOTag]tagConfig{ - IOTagBackground: {}, - IOTagClient: {}, - IOTagInternal: {}, - IOTagPolicer: {}, - IOTagTreeSync: {}, - IOTagWritecache: {}, - } - for _, t := range configTags { - tag, err := FromRawString(t.Tag) - if err != nil { - return fmt.Errorf("invalid tag %s: %w", t.Tag, err) - } - if _, ok := tags[tag]; !ok { - return fmt.Errorf("tag %s is not configurable", t.Tag) - } - tags[tag] = tagConfig{ - Shares: t.Weight, - Limit: t.LimitOps, - Reserved: t.ReservedOps, - } - } - idx := 0 - var shares float64 - for t, v := range tags { - if idx == 0 { - idx++ - shares = float64Value(v.Shares) - } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { - return errWeightsMustBeSpecified - } - if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { - return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) - } - if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { - return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) - } - if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { - return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String()) - } - } - return nil -} - -func float64Value(f *float64) float64 { - if f == nil { - return 0.0 - } - return *f -} diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go index 8cbb1cce9..953b91a79 100644 --- a/pkg/ape/contract_storage/proxy.go +++ b/pkg/ape/contract_storage/proxy.go @@ -31,7 +31,9 @@ type RPCActorProvider interface { type ProxyVerificationContractStorage struct { rpcActorProvider RPCActorProvider - cosigners []actor.SignerAccount + acc *wallet.Account + + proxyScriptHash util.Uint160 policyScriptHash util.Uint160 } @@ -39,27 +41,12 @@ type ProxyVerificationContractStorage struct { var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil) func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage { - acc := wallet.NewAccountFromPrivateKey(key) return &ProxyVerificationContractStorage{ rpcActorProvider: rpcActorProvider, - cosigners: []actor.SignerAccount{ - { - Signer: transaction.Signer{ - Account: proxyScriptHash, - Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{policyScriptHash}, - }, - Account: notary.FakeContractAccount(proxyScriptHash), - }, - { - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CalledByEntry, - }, - Account: acc, - }, - }, + acc: wallet.NewAccountFromPrivateKey(key), + + proxyScriptHash: proxyScriptHash, policyScriptHash: policyScriptHash, } @@ -77,7 +64,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke { func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) { rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - act, err := actor.New(rpcActor, contractStorage.cosigners) + act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash)) if err != nil { return nil, err } @@ -111,16 +98,31 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na // ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners. func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { - rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor} - return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) + // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but + // ProxyVerificationContractStorage does not manage reconnections. + contractStorageActor, err := contractStorage.newContractStorageActor() + if err != nil { + return nil, err + } + return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) } -type invokerAdapter struct { - *invoker.Invoker - rpcInvoker invoker.RPCInvoke -} - -func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { - return n.rpcInvoker +func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount { + return []actor.SignerAccount{ + { + Signer: transaction.Signer{ + Account: proxyScriptHash, + Scopes: transaction.CustomContracts, + AllowedContracts: []util.Uint160{policyScriptHash}, + }, + Account: notary.FakeContractAccount(proxyScriptHash), + }, + { + Signer: transaction.Signer{ + Account: acc.Contract.ScriptHash(), + Scopes: transaction.CalledByEntry, + }, + Account: acc, + }, + } } diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go index d32bd4a07..c0413678d 100644 --- a/pkg/ape/request/frostfsid.go +++ b/pkg/ape/request/frostfsid.go @@ -1,7 +1,6 @@ package request import ( - "context" "fmt" "strconv" "strings" @@ -13,9 +12,9 @@ import ( ) // FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. -func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { +func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { reqProps := make(map[string]string) - subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) + subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) @@ -37,8 +36,8 @@ func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfs } // Groups return the actor's group ids from frostfsid contract. -func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { - subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) +func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { + subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go index 91ee5c6c3..d4bc0cf68 100644 --- a/pkg/core/client/util.go +++ b/pkg/core/client/util.go @@ -3,7 +3,6 @@ package client import ( "bytes" "fmt" - "iter" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -20,7 +19,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro // Args must not be nil. func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { PublicKey() []byte - Addresses() iter.Seq[string] + IterateAddresses(func(string) bool) NumberOfAddresses() int ExternalAddresses() []string }, diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go index 1c52d93e7..62cc21553 100644 --- a/pkg/core/container/info.go +++ b/pkg/core/container/info.go @@ -1,7 +1,6 @@ package container import ( - "context" "sync" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" @@ -20,7 +19,7 @@ type infoValue struct { } type InfoProvider interface { - Info(ctx context.Context, id cid.ID) (Info, error) + Info(id cid.ID) (Info, error) } type infoProvider struct { @@ -44,13 +43,13 @@ func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider { } } -func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { +func (r *infoProvider) Info(id cid.ID) (Info, error) { v, found := r.tryGetFromCache(id) if found { return v.info, v.err } - return r.getFromSource(ctx, id) + return r.getFromSource(id) } func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { @@ -61,7 +60,7 @@ func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { return value, found } -func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { +func (r *infoProvider) getFromSource(id cid.ID) (Info, error) { r.kl.Lock(id) defer r.kl.Unlock(id) @@ -76,11 +75,11 @@ func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, erro return Info{}, r.sourceErr } - cnr, err := r.source.Get(ctx, id) + cnr, err := r.source.Get(id) var civ infoValue if err != nil { if client.IsErrContainerNotFound(err) { - removed, err := WasRemoved(ctx, r.source, id) + removed, err := WasRemoved(r.source, id) if err != nil { civ.err = err } else { diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go index 4eb14e53c..ba4404546 100644 --- a/pkg/core/container/storage.go +++ b/pkg/core/container/storage.go @@ -1,8 +1,6 @@ package container import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -43,9 +41,9 @@ type Source interface { // // Implementations must not retain the container pointer and modify // the container through it. - Get(ctx context.Context, cid cid.ID) (*Container, error) + Get(cid.ID) (*Container, error) - DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) + DeletionInfo(cid.ID) (*DelInfo, error) } // EACL groups information about the FrostFS container's extended ACL stored in diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index 61c568052..d27556807 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -1,7 +1,6 @@ package container import ( - "context" "errors" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -11,8 +10,8 @@ import ( // WasRemoved checks whether the container ever existed or // it just has not been created yet at the current epoch. -func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { - _, err := s.DeletionInfo(ctx, cid) +func WasRemoved(s Source, cid cid.ID) (bool, error) { + _, err := s.DeletionInfo(cid) if err == nil { return true, nil } @@ -26,10 +25,10 @@ func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { // IsIndexedContainer returns True if container attributes should be indexed. func IsIndexedContainer(cnr containerSDK.Container) bool { var isS3Container bool - for key := range cnr.Attributes() { + cnr.IterateAttributes(func(key, _ string) { if key == ".s3-location-constraint" { isS3Container = true } - } + }) return !isS3Container } diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go index e752043d3..ecfd0eb15 100644 --- a/pkg/core/frostfsid/subject_provider.go +++ b/pkg/core/frostfsid/subject_provider.go @@ -1,8 +1,6 @@ package frostfsid import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -13,6 +11,6 @@ const ( // SubjectProvider interface provides methods to get subject from FrostfsID contract. type SubjectProvider interface { - GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) - GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) + GetSubject(util.Uint160) (*client.Subject, error) + GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error) } diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index e58e42634..b0c9e1f9e 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -1,10 +1,6 @@ package netmap -import ( - "iter" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) +import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" // Node is a named type of netmap.NodeInfo which provides interface needed // in the current repository. Node is expected to be used everywhere instead @@ -18,20 +14,10 @@ func (x Node) PublicKey() []byte { return (netmap.NodeInfo)(x).PublicKey() } -// Addresses returns an iterator over all announced network addresses. -func (x Node) Addresses() iter.Seq[string] { - return (netmap.NodeInfo)(x).NetworkEndpoints() -} - // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. -// Deprecated: use [Node.Addresses] instead. func (x Node) IterateAddresses(f func(string) bool) { - for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { - if f(s) { - return - } - } + (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) } // NumberOfAddresses returns number of announced network addresses. diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go index 97313da84..7770c61c7 100644 --- a/pkg/core/netmap/storage.go +++ b/pkg/core/netmap/storage.go @@ -1,8 +1,6 @@ package netmap import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -18,7 +16,7 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) + GetNetMap(diff uint64) (*netmap.NetMap, error) // GetNetMapByEpoch reads network map by the epoch number from the storage. // It returns the pointer to the requested network map and any error encountered. @@ -27,21 +25,21 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) + GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) // Epoch reads the current epoch from the storage. // It returns thw number of the current epoch and any error encountered. // // Must return exactly one non-default value. - Epoch(ctx context.Context) (uint64, error) + Epoch() (uint64, error) } // GetLatestNetworkMap requests and returns the latest network map from the storage. -func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { - return src.GetNetMap(ctx, 0) +func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) { + return src.GetNetMap(0) } // GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. -func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { - return src.GetNetMap(ctx, 1) +func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) { + return src.GetNetMap(1) } diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index cf090eb37..19b5d34e4 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -199,7 +199,7 @@ func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSD cnrIDBin := make([]byte, sha256.Size) cnrID.Encode(cnrIDBin) - cnr, err := v.containers.Get(ctx, cnrID) + cnr, err := v.containers.Get(cnrID) if err != nil { return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) } diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index dc336eb34..20560cf3a 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -411,11 +410,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ + &testNetmapSource{ + netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, }, - CurrentEpoch: curEpoch, + currentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -484,12 +483,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ + &testNetmapSource{ + netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - CurrentEpoch: curEpoch, + currentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -560,12 +559,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ + &testNetmapSource{ + netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - CurrentEpoch: curEpoch, + currentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -579,7 +578,7 @@ type testIRSource struct { irNodes [][]byte } -func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { +func (s *testIRSource) InnerRingKeys() ([][]byte, error) { return s.irNodes, nil } @@ -587,13 +586,36 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { return nil, nil } + +type testNetmapSource struct { + netmaps map[uint64]*netmap.NetMap + currentEpoch uint64 +} + +func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) { + if diff >= s.currentEpoch { + return nil, fmt.Errorf("invalid diff") + } + return s.GetNetMapByEpoch(s.currentEpoch - diff) +} + +func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { + if nm, found := s.netmaps[epoch]; found { + return nm, nil + } + return nil, fmt.Errorf("netmap not found") +} + +func (s *testNetmapSource) Epoch() (uint64, error) { + return s.currentEpoch, nil +} diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go index aab12ebf9..67c9a3188 100644 --- a/pkg/core/object/info.go +++ b/pkg/core/object/info.go @@ -13,13 +13,6 @@ type ECInfo struct { Total uint32 } -func (v *ECInfo) String() string { - if v == nil { - return "" - } - return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) -} - // Info groups object address with its FrostFS // object info. type Info struct { @@ -30,5 +23,5 @@ type Info struct { } func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) + return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject) } diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index 3733ed507..a1a5fcac1 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -18,7 +18,7 @@ import ( ) type InnerRing interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) + InnerRingKeys() ([][]byte, error) } type SenderClassifier struct { @@ -63,7 +63,7 @@ func (c SenderClassifier) Classify( } func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { - isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) + isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) if err != nil { // do not throw error, try best case matching c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, @@ -78,7 +78,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK binCnr := make([]byte, sha256.Size) idCnr.Encode(binCnr) - isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) + isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr) if err != nil { // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so @@ -99,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK }, nil } -func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { - innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) +func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { + innerRingKeys, err := c.innerRing.InnerRingKeys() if err != nil { return false, err } @@ -116,11 +116,10 @@ func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (boo } func (c SenderClassifier) isContainerKey( - ctx context.Context, owner, idCnr []byte, cnr container.Container, ) (bool, error) { - nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap + nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap if err != nil { return false, err } @@ -134,7 +133,7 @@ func (c SenderClassifier) isContainerKey( // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) + nm, err = core.GetPreviousNetworkMap(c.netmap) if err != nil { return false, err } diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go index 7deec3f31..4a80ebf3b 100644 --- a/pkg/innerring/fetcher.go +++ b/pkg/innerring/fetcher.go @@ -1,8 +1,6 @@ package innerring import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -49,12 +47,12 @@ type IrFetcherWithoutNotary struct { // InnerRingKeys fetches list of innerring keys from NeoFSAlphabet // role in the sidechain. -func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { - return fN.cli.NeoFSAlphabetList(ctx) +func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) { + return fN.cli.NeoFSAlphabetList() } // InnerRingKeys fetches list of innerring keys from netmap contract // in the sidechain. -func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { - return f.nm.GetInnerRingList(ctx) +func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) { + return f.nm.GetInnerRingList() } diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go index 439400bac..45135a57b 100644 --- a/pkg/innerring/indexer.go +++ b/pkg/innerring/indexer.go @@ -1,7 +1,6 @@ package innerring import ( - "context" "fmt" "sync" "time" @@ -11,7 +10,7 @@ import ( type ( irFetcher interface { - InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) + InnerRingKeys() (keys.PublicKeys, error) } committeeFetcher interface { @@ -46,7 +45,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK } } -func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { +func (s *innerRingIndexer) update() (ind indexes, err error) { s.RLock() if time.Since(s.lastAccess) < s.timeout { @@ -63,7 +62,7 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) return s.ind, nil } - innerRing, err := s.irFetcher.InnerRingKeys(ctx) + innerRing, err := s.irFetcher.InnerRingKeys() if err != nil { return indexes{}, err } @@ -82,8 +81,8 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) return s.ind, nil } -func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { - ind, err := s.update(ctx) +func (s *innerRingIndexer) InnerRingIndex() (int32, error) { + ind, err := s.update() if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -91,8 +90,8 @@ func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { return ind.innerRingIndex, nil } -func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { - ind, err := s.update(ctx) +func (s *innerRingIndexer) InnerRingSize() (int32, error) { + ind, err := s.update() if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -100,8 +99,8 @@ func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { return ind.innerRingSize, nil } -func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { - ind, err := s.update(ctx) +func (s *innerRingIndexer) AlphabetIndex() (int32, error) { + ind, err := s.update() if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go index f8201b7df..c8a819b5b 100644 --- a/pkg/innerring/indexer_test.go +++ b/pkg/innerring/indexer_test.go @@ -1,7 +1,6 @@ package innerring import ( - "context" "fmt" "sync/atomic" "testing" @@ -38,15 +37,15 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex(context.Background()) + idx, err := indexer.AlphabetIndex() require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(2), idx, "invalid IR index") - size, err := indexer.InnerRingSize(context.Background()) + size, err := indexer.InnerRingSize() require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(3), size, "invalid IR size") }) @@ -57,11 +56,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex(context.Background()) + idx, err := indexer.AlphabetIndex() require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(0), idx, "invalid IR index") }) @@ -72,11 +71,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex(context.Background()) + idx, err := indexer.AlphabetIndex() require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") }) @@ -101,30 +100,30 @@ func TestIndexerCachesIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex(context.Background()) + idx, err := indexer.AlphabetIndex() require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err := indexer.InnerRingSize(context.Background()) + size, err := indexer.InnerRingSize() require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") - idx, err = indexer.AlphabetIndex(context.Background()) + idx, err = indexer.AlphabetIndex() require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize(context.Background()) + size, err = indexer.InnerRingSize() require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -133,15 +132,15 @@ func TestIndexerCachesIndexes(t *testing.T) { time.Sleep(2 * time.Second) - idx, err = indexer.AlphabetIndex(context.Background()) + idx, err = indexer.AlphabetIndex() require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize(context.Background()) + size, err = indexer.InnerRingSize() require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -166,15 +165,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex(context.Background()) + idx, err := indexer.AlphabetIndex() require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err := indexer.InnerRingSize(context.Background()) + size, err := indexer.InnerRingSize() require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") @@ -190,15 +189,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer = newInnerRingIndexer(cf, irf, key, time.Second) - idx, err = indexer.AlphabetIndex(context.Background()) + idx, err = indexer.AlphabetIndex() require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex(context.Background()) + idx, err = indexer.InnerRingIndex() require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err = indexer.InnerRingSize(context.Background()) + size, err = indexer.InnerRingSize() require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") } @@ -220,7 +219,7 @@ type testIRFetcher struct { calls atomic.Int32 } -func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { f.calls.Add(1) return f.keys, f.err } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 3d236641e..ecaf8ae86 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -38,7 +38,10 @@ import ( func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, alphaSync event.Handler, ) error { - locodeValidator := s.newLocodeValidator(cfg) + locodeValidator, err := s.newLocodeValidator(cfg) + if err != nil { + return err + } netSettings := (*networkSettings)(s.netmapClient) @@ -48,9 +51,8 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, poolSize := cfg.GetInt("workers.netmap") s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) - var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), @@ -159,7 +161,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli } else { // create governance processor governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, FrostFSClient: frostfsCli, AlphabetState: s, @@ -225,7 +227,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, @@ -247,7 +249,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, AlphabetState: s, @@ -268,7 +270,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, FrostFSClient: frostfsCli, @@ -291,7 +293,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, @@ -342,7 +344,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, controlsrv.WithAllowedKeys(authKeys), - ), log.WithTag(logger.TagGrpcSvc), audit) + ), log, audit) grpcControlSrv := grpc.NewServer() control.RegisterControlServiceServer(grpcControlSrv, controlSvc) @@ -458,7 +460,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- } morphChain := &chainParams{ - log: s.log.WithTag(logger.TagMorph), + log: s.log, cfg: cfg, key: s.key, name: morphPrefix, diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 3a5137261..0b9e83443 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan ) (*Server, error) { var err error server := &Server{ - log: log.WithTag(logger.TagIr), + log: log, irMetrics: metrics, cmode: cmode, } @@ -575,19 +575,19 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe func (s *Server) initConfigFromBlockchain(ctx context.Context) error { // get current epoch - epoch, err := s.netmapClient.Epoch(ctx) + epoch, err := s.netmapClient.Epoch() if err != nil { return fmt.Errorf("can't read epoch number: %w", err) } // get current epoch duration - epochDuration, err := s.netmapClient.EpochDuration(ctx) + epochDuration, err := s.netmapClient.EpochDuration() if err != nil { return fmt.Errorf("can't read epoch duration: %w", err) } // get balance precision - balancePrecision, err := s.balanceClient.Decimals(ctx) + balancePrecision, err := s.balanceClient.Decimals() if err != nil { return fmt.Errorf("can't read balance contract precision: %w", err) } @@ -597,7 +597,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { s.precision.SetBalancePrecision(balancePrecision) // get next epoch delta tick - s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) + s.initialEpochTickDelta, err = s.nextEpochBlockDelta() if err != nil { return err } @@ -613,8 +613,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { return nil } -func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { - epochBlock, err := s.netmapClient.LastEpochBlock(ctx) +func (s *Server) nextEpochBlockDelta() (uint32, error) { + epochBlock, err := s.netmapClient.LastEpochBlock() if err != nil { return 0, fmt.Errorf("can't read last epoch block: %w", err) } diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go index ae4c85168..a0c3ea751 100644 --- a/pkg/innerring/locode.go +++ b/pkg/innerring/locode.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/viper" ) -func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { +func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) { locodeDB := locodebolt.New(locodebolt.Prm{ Path: cfg.GetString("locode.db.path"), }, @@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { return irlocode.New(irlocode.Prm{ DB: (*locodeBoltDBWrapper)(locodeDB), - }) + }), nil } type locodeBoltEntryWrapper struct { diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go index fb11e9426..9961710ca 100644 --- a/pkg/innerring/netmap.go +++ b/pkg/innerring/netmap.go @@ -1,7 +1,6 @@ package innerring import ( - "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -18,8 +17,8 @@ type networkSettings netmapclient.Client // MaintenanceModeAllowed requests network configuration from the Sidechain // and check allowance of storage node's maintenance mode according to it. // Always returns state.ErrMaintenanceModeDisallowed. -func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { - allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) +func (s *networkSettings) MaintenanceModeAllowed() error { + allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed() if err != nil { return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) } else if allowed { diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index 1da3c401d..ac3e2a14d 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -279,6 +279,6 @@ type testNetmapClient struct { netmap *netmap.NetMap } -func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index d3d0f83f2..8e11d2d61 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -44,7 +44,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool { return true } - networkMap, err := ap.netmapClient.NetMap(ctx) + networkMap, err := ap.netmapClient.NetMap() if err != nil { ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.Error(err)) diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 0aea74003..2c4654e7c 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -36,7 +36,7 @@ type ( } netmapClient interface { - NetMap(ctx context.Context) (*netmap.NetMap, error) + NetMap() (*netmap.NetMap, error) } morphClient interface { diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go index 5334b9a1f..ba12ebb37 100644 --- a/pkg/innerring/processors/container/common.go +++ b/pkg/innerring/processors/container/common.go @@ -1,7 +1,6 @@ package container import ( - "context" "crypto/ecdsa" "errors" "fmt" @@ -46,7 +45,7 @@ type signatureVerificationData struct { // - v.binPublicKey is a public session key // - session context corresponds to the container and verb in v // - session is "alive" -func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { +func (cp *Processor) verifySignature(v signatureVerificationData) error { var err error var key frostfsecdsa.PublicKeyRFC6979 keyProvided := v.binPublicKey != nil @@ -59,7 +58,7 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio } if len(v.binTokenSession) > 0 { - return cp.verifyByTokenSession(ctx, v, &key, keyProvided) + return cp.verifyByTokenSession(v, &key, keyProvided) } if keyProvided { @@ -78,8 +77,8 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio return errors.New("signature is invalid or calculated with the key not bound to the container owner") } -func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { - curEpoch, err := cp.netState.Epoch(ctx) +func (cp *Processor) checkTokenLifetime(token session.Container) error { + curEpoch, err := cp.netState.Epoch() if err != nil { return fmt.Errorf("could not read current epoch: %w", err) } @@ -91,7 +90,7 @@ func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Conta return nil } -func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { +func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { var tok session.Container err := tok.Unmarshal(v.binTokenSession) @@ -119,7 +118,7 @@ func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerifi return errors.New("owner differs with token owner") } - err = cp.checkTokenLifetime(ctx, tok) + err = cp.checkTokenLifetime(tok) if err != nil { return fmt.Errorf("check session lifetime: %w", err) } diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index 1b3842eb0..f28e5372a 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -170,11 +170,11 @@ type testNetworkState struct { epoch uint64 } -func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { +func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) { return s.homHashDisabled, nil } -func (s *testNetworkState) Epoch(context.Context) (uint64, error) { +func (s *testNetworkState) Epoch() (uint64, error) { return s.epoch, nil } @@ -187,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { +func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) { key := hex.EncodeToString(cid) if cont, found := c.get[key]; found { return cont, nil @@ -237,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction) type testFrostFSIDClient struct{} -func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { return &frostfsidclient.Subject{}, nil } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 8e4ab2623..ffaea653a 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -47,7 +47,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool e: put, } - err := cp.checkPutContainer(ctx, pctx) + err := cp.checkPutContainer(pctx) if err != nil { cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, zap.Error(err), @@ -66,8 +66,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool return true } -func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { - binCnr := pctx.e.Container() +func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { + binCnr := ctx.e.Container() var cnr containerSDK.Container err := cnr.Unmarshal(binCnr) @@ -75,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo return fmt.Errorf("invalid binary container: %w", err) } - err = cp.verifySignature(ctx, signatureVerificationData{ + err = cp.verifySignature(signatureVerificationData{ ownerContainer: cnr.Owner(), verb: session.VerbContainerPut, - binTokenSession: pctx.e.SessionToken(), - binPublicKey: pctx.e.PublicKey(), - signature: pctx.e.Signature(), + binTokenSession: ctx.e.SessionToken(), + binPublicKey: ctx.e.PublicKey(), + signature: ctx.e.Signature(), signedData: binCnr, }) if err != nil { @@ -88,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo } // check homomorphic hashing setting - err = checkHomomorphicHashing(ctx, cp.netState, cnr) + err = checkHomomorphicHashing(cp.netState, cnr) if err != nil { return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) } // check native name and zone - err = cp.checkNNS(ctx, pctx, cnr) + err = cp.checkNNS(ctx, cnr) if err != nil { return fmt.Errorf("NNS: %w", err) } @@ -110,7 +110,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven return true } - err := cp.checkDeleteContainer(ctx, e) + err := cp.checkDeleteContainer(e) if err != nil { cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, zap.Error(err), @@ -130,7 +130,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven return true } -func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { +func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { binCnr := e.ContainerID() var idCnr cid.ID @@ -141,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent. } // receive owner of the related container - cnr, err := cp.cnrClient.Get(ctx, binCnr) + cnr, err := cp.cnrClient.Get(binCnr) if err != nil { return fmt.Errorf("could not receive the container: %w", err) } - err = cp.verifySignature(ctx, signatureVerificationData{ + err = cp.verifySignature(signatureVerificationData{ ownerContainer: cnr.Value.Owner(), verb: session.VerbContainerDelete, idContainerSet: true, @@ -163,21 +163,21 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent. return nil } -func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { +func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error { // fetch domain info - pctx.d = containerSDK.ReadDomain(cnr) + ctx.d = containerSDK.ReadDomain(cnr) // if PutNamed event => check if values in container correspond to args - if named, ok := pctx.e.(interface { + if named, ok := ctx.e.(interface { Name() string Zone() string }); ok { - if name := named.Name(); name != pctx.d.Name() { - return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) + if name := named.Name(); name != ctx.d.Name() { + return fmt.Errorf("names differ %s/%s", name, ctx.d.Name()) } - if zone := named.Zone(); zone != pctx.d.Zone() { - return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) + if zone := named.Zone(); zone != ctx.d.Zone() { + return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone()) } } @@ -186,12 +186,12 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn return fmt.Errorf("could not get container owner address: %w", err) } - subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) + subject, err := cp.frostFSIDClient.GetSubject(addr) if err != nil { return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) } - namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") + namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns") if !hasNamespace { return nil } @@ -203,13 +203,13 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn return nil } -func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { - netSetting, err := ns.HomomorphicHashDisabled(ctx) +func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error { + netSetting, err := ns.HomomorphicHashDisabled() if err != nil { return fmt.Errorf("could not get setting in contract: %w", err) } - if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting { + if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting { return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting) } diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 9be93baa4..a0b7491e1 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -25,7 +25,7 @@ type ( ContClient interface { ContractAddress() util.Uint160 - Get(ctx context.Context, cid []byte) (*containercore.Container, error) + Get(cid []byte) (*containercore.Container, error) } MorphClient interface { @@ -33,7 +33,7 @@ type ( } FrostFSIDClient interface { - GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) + GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) } // Processor of events produced by container contract in the sidechain. @@ -68,7 +68,7 @@ type NetworkState interface { // // Must return any error encountered // which did not allow reading the value. - Epoch(ctx context.Context) (uint64, error) + Epoch() (uint64, error) // HomomorphicHashDisabled must return boolean that // represents homomorphic network state: @@ -76,7 +76,7 @@ type NetworkState interface { // * false if hashing is enabled. // // which did not allow reading the value. - HomomorphicHashDisabled(ctx context.Context) (bool, error) + HomomorphicHashDisabled() (bool, error) } // New creates a container contract processor instance. diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 864c5da67..5a6126249 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -236,7 +236,7 @@ type testIRFetcher struct { publicKeys keys.PublicKeys } -func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { return f.publicKeys, nil } @@ -266,7 +266,7 @@ type testMainnetClient struct { designateHash util.Uint160 } -func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { +func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) { return c.alphabetKeys, nil } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 6e22abb3c..245679656 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -25,7 +25,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 return true } - mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx) + mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, zap.Error(err)) @@ -95,7 +95,7 @@ func prettyKeys(keys keys.PublicKeys) string { } func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { - innerRing, err := gp.irFetcher.InnerRingKeys(ctx) + innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, zap.Error(err)) diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 2d131edda..7859ebee1 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -52,7 +52,7 @@ type ( // Implementation must take into account availability of // the notary contract. IRFetcher interface { - InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) + InnerRingKeys() (keys.PublicKeys, error) } FrostFSClient interface { @@ -64,7 +64,7 @@ type ( } MainnetClient interface { - NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) + NeoFSAlphabetList() (res keys.PublicKeys, err error) GetDesignateHash() util.Uint160 } diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 934c3790d..5a5adfb2d 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -294,7 +294,7 @@ type testNodeStateSettings struct { maintAllowed bool } -func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { +func (s *testNodeStateSettings) MaintenanceModeAllowed() error { if s.maintAllowed { return nil } @@ -303,7 +303,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { type testValidator struct{} -func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error { +func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error { return nil } @@ -381,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) { +func (c *testNetmapClient) EpochDuration() (uint64, error) { return c.epochDuration, nil } @@ -392,7 +392,7 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) { return 0, fmt.Errorf("not found") } -func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go index b81dc9989..5e0558344 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go @@ -1,7 +1,6 @@ package locode import ( - "context" "errors" "fmt" @@ -30,7 +29,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record // - Continent: R.Continent().String(). // // UN-LOCODE attribute remains untouched. -func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { attrLocode := n.LOCODE() if attrLocode == "" { return nil diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go index fa2dd1ac1..8ab174dfd 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go @@ -1,7 +1,6 @@ package locode_test import ( - "context" "errors" "fmt" "testing" @@ -93,7 +92,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { t.Run("w/o locode", func(t *testing.T) { n := nodeInfoWithSomeAttrs() - err := validator.VerifyAndUpdate(context.Background(), n) + err := validator.VerifyAndUpdate(n) require.NoError(t, err) }) @@ -103,7 +102,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttrValue(n, "WRONG LOCODE") - err := validator.VerifyAndUpdate(context.Background(), n) + err := validator.VerifyAndUpdate(n) require.Error(t, err) }) @@ -112,7 +111,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"}) - err := validator.VerifyAndUpdate(context.Background(), n) + err := validator.VerifyAndUpdate(n) require.Error(t, err) }) @@ -120,7 +119,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, r.LOCODE) - err := validator.VerifyAndUpdate(context.Background(), n) + err := validator.VerifyAndUpdate(n) require.NoError(t, err) require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode")) diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go index 0e4628ac7..126f36582 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go @@ -1,7 +1,6 @@ package maddress import ( - "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -9,7 +8,7 @@ import ( ) // VerifyAndUpdate calls network.VerifyAddress. -func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { err := network.VerifyMultiAddress(*n) if err != nil { return fmt.Errorf("could not verify multiaddress: %w", err) diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go index 03c41a451..e5165f618 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go @@ -7,7 +7,6 @@ map candidates. package state import ( - "context" "errors" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -24,7 +23,7 @@ type NetworkSettings interface { // no error if allowed; // ErrMaintenanceModeDisallowed if disallowed; // other error if there are any problems with the check. - MaintenanceModeAllowed(ctx context.Context) error + MaintenanceModeAllowed() error } // NetMapCandidateValidator represents tool which checks state of nodes which @@ -56,13 +55,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting // MUST NOT be called before SetNetworkSettings. // // See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. -func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error { +func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error { if node.Status().IsOnline() { return nil } if node.Status().IsMaintenance() { - return x.netSettings.MaintenanceModeAllowed(ctx) + return x.netSettings.MaintenanceModeAllowed() } return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE") diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go index cbf48a710..b81d7243b 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go @@ -1,7 +1,6 @@ package state_test import ( - "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -14,7 +13,7 @@ type testNetworkSettings struct { disallowed bool } -func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error { +func (x testNetworkSettings) MaintenanceModeAllowed() error { if x.disallowed { return state.ErrMaintenanceModeDisallowed } @@ -82,7 +81,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { testCase.validatorPreparer(&v) } - err := v.VerifyAndUpdate(context.Background(), &node) + err := v.VerifyAndUpdate(&node) if testCase.valid { require.NoError(t, err, testCase.name) diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go index 3dbe98a8d..e9b24e024 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go @@ -1,8 +1,6 @@ package nodevalidation import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -28,9 +26,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator { // VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators. // // If error appears, returns it immediately. -func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error { +func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error { for _, v := range c.validators { - if err := v.VerifyAndUpdate(ctx, ni); err != nil { + if err := v.VerifyAndUpdate(ni); err != nil { return err } } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 7c78d24a5..93e00bbaa 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -14,7 +14,7 @@ import ( func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { epoch := ev.EpochNumber() - epochDuration, err := np.netmapClient.EpochDuration(ctx) + epochDuration, err := np.netmapClient.EpochDuration() if err != nil { np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, zap.Error(err)) @@ -37,7 +37,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc } // get new netmap snapshot - networkMap, err := np.netmapClient.NetMap(ctx) + networkMap, err := np.netmapClient.NetMap() if err != nil { np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, zap.Error(err)) diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index b5c727cc7..5b565ffd1 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -39,7 +39,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) } // validate and update node info - err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo) + err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.Error(err), @@ -108,7 +108,7 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat var err error if ev.Maintenance() { - err = np.nodeStateSettings.MaintenanceModeAllowed(ctx) + err = np.nodeStateSettings.MaintenanceModeAllowed() if err != nil { np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index 277bca1c3..36df57afe 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -49,15 +49,15 @@ type ( // // If no error occurs, the parameter must point to the // ready-made NodeInfo structure. - VerifyAndUpdate(context.Context, *netmap.NodeInfo) error + VerifyAndUpdate(*netmap.NodeInfo) error } Client interface { MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error ContractAddress() util.Uint160 - EpochDuration(ctx context.Context) (uint64, error) + EpochDuration() (uint64, error) MorphTxHeight(h util.Uint256) (res uint32, err error) - NetMap(ctx context.Context) (*netmap.NetMap, error) + NetMap() (*netmap.NetMap, error) NewEpoch(ctx context.Context, epoch uint64) error MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go index 310f12248..9cd71ae48 100644 --- a/pkg/innerring/processors/netmap/wrappers.go +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -34,16 +34,16 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 { return w.netmapClient.ContractAddress() } -func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) { - return w.netmapClient.EpochDuration(ctx) +func (w *netmapClientWrapper) EpochDuration() (uint64, error) { + return w.netmapClient.EpochDuration() } func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { return w.netmapClient.Morph().TxHeight(h) } -func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) { - return w.netmapClient.NetMap(ctx) +func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { + return w.netmapClient.NetMap() } func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 0ef771359..3e9880e70 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -60,7 +60,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool { // InnerRingIndex is a getter for a global index of node in inner ring list. Negative // index means that node is not in the inner ring list. func (s *Server) InnerRingIndex(ctx context.Context) int { - index, err := s.statusIndex.InnerRingIndex(ctx) + index, err := s.statusIndex.InnerRingIndex() if err != nil { s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) return -1 @@ -72,7 +72,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int { // InnerRingSize is a getter for a global size of inner ring list. This value // paired with inner ring index. func (s *Server) InnerRingSize(ctx context.Context) int { - size, err := s.statusIndex.InnerRingSize(ctx) + size, err := s.statusIndex.InnerRingSize() if err != nil { s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) return 0 @@ -84,7 +84,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int { // AlphabetIndex is a getter for a global index of node in alphabet list. // Negative index means that node is not in the alphabet list. func (s *Server) AlphabetIndex(ctx context.Context) int { - index, err := s.statusIndex.AlphabetIndex(ctx) + index, err := s.statusIndex.AlphabetIndex() if err != nil { s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) return -1 diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index a6c40f9fa..08ef8b86c 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "Blobovnicza")) } } diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index 8f24b5675..d821b2991 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -6,6 +6,7 @@ import ( "syscall" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -93,6 +94,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(dataSize)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) b.itemDeleted(recordSize) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index 3e8b9f07b..d9e99d0d1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -158,11 +158,11 @@ func (b *Blobovniczas) Path() string { } // SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { +func (b *Blobovniczas) SetCompressor(cc *compression.Config) { b.compression = cc } -func (b *Blobovniczas) Compressor() *compression.Compressor { +func (b *Blobovniczas) Compressor() *compression.Config { return b.compression } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index f87f4a144..ec9743b57 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -19,8 +19,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { st := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(10), WithBlobovniczaShallowDepth(1), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index a6c1ce368..c77df63bf 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -41,34 +41,35 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - if b.blzInitWorkerCount > 0 { - eg.SetLimit(b.blzInitWorkerCount + 1) - } - eg.Go(func() error { - return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { - eg.Go(func() error { - p = strings.TrimSuffix(p, rebuildSuffix) - shBlz := b.getBlobovniczaWithoutCaching(p) - blz, err := shBlz.Open(egCtx) - if err != nil { - return err - } - defer shBlz.Close(egCtx) + eg.SetLimit(b.blzInitWorkerCount) + err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { + eg.Go(func() error { + p = strings.TrimSuffix(p, rebuildSuffix) + shBlz := b.getBlobovniczaWithoutCaching(p) + blz, err := shBlz.Open(egCtx) + if err != nil { + return err + } + defer shBlz.Close(egCtx) - moveInfo, err := blz.ListMoveInfo(egCtx) - if err != nil { - return err - } - for _, move := range moveInfo { - b.deleteProtectedObjects.Add(move.Address) - } + moveInfo, err := blz.ListMoveInfo(egCtx) + if err != nil { + return err + } + for _, move := range moveInfo { + b.deleteProtectedObjects.Add(move.Address) + } - b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) - return nil - }) - return false, nil + b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) + return nil }) + return false, nil }) + if err != nil { + _ = eg.Wait() + return err + } + return eg.Wait() } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go index 7db1891f9..b26323bd0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go @@ -2,9 +2,6 @@ package blobovniczatree import ( "context" - "os" - "path" - "strconv" "testing" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -132,34 +129,3 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, blz.Close(context.Background())) } - -func TestInitBlobovniczasInitErrorType(t *testing.T) { - t.Parallel() - - rootDir := t.TempDir() - - for idx := 0; idx < 10; idx++ { - f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db")) - require.NoError(t, err) - _, err = f.Write([]byte("invalid db")) - require.NoError(t, err) - require.NoError(t, f.Close()) - - f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix)) - require.NoError(t, err) - require.NoError(t, f.Close()) - } - - blz := NewBlobovniczaTree( - context.Background(), - WithBlobovniczaShallowDepth(1), - WithBlobovniczaShallowWidth(1), - WithRootPath(rootDir), - ) - - require.NoError(t, blz.Open(mode.ComponentReadWrite)) - err := blz.Init() - require.Contains(t, err.Error(), "open blobovnicza") - require.Contains(t, err.Error(), "invalid database") - require.NoError(t, blz.Close(context.Background())) -} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index d096791c3..47e12bafb 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -18,10 +19,7 @@ import ( "go.uber.org/zap" ) -var ( - errObjectIsDeleteProtected = errors.New("object is delete protected") - deleteRes = common.DeleteRes{} -) +var errObjectIsDeleteProtected = errors.New("object is delete protected") // Delete deletes object from blobovnicza tree. // @@ -45,17 +43,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co defer span.End() if b.readOnly { - return deleteRes, common.ErrReadOnly + return common.DeleteRes{}, common.ErrReadOnly } if b.rebuildGuard.TryRLock() { defer b.rebuildGuard.RUnlock() } else { - return deleteRes, errRebuildInProgress + return common.DeleteRes{}, errRebuildInProgress } if b.deleteProtectedObjects.Contains(prm.Address) { - return deleteRes, errObjectIsDeleteProtected + return common.DeleteRes{}, errObjectIsDeleteProtected } var bPrm blobovnicza.DeletePrm @@ -85,6 +83,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } } @@ -99,7 +98,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if err == nil && !objectFound { // not found in any blobovnicza - return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound)) + return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) } success = err == nil @@ -113,7 +112,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz shBlz := b.getBlobovnicza(ctx, blzPath) blz, err := shBlz.Open(ctx) if err != nil { - return deleteRes, err + return common.DeleteRes{}, err } defer shBlz.Close(ctx) @@ -123,5 +122,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz // removes object from blobovnicza and returns common.DeleteRes. func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) { _, err := blz.Delete(ctx, prm) - return deleteRes, err + return common.DeleteRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 0c5e48821..d2c99945f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "go.opentelemetry.io/otel/attribute" @@ -56,7 +57,8 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index df2b4ffe5..5414140f0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -19,8 +19,7 @@ func TestExistsInvalidStorageID(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go index 9244d765c..d390ecf1d 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go @@ -15,8 +15,7 @@ func TestGeneric(t *testing.T) { helper := func(t *testing.T, dir string) common.Storage { return NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -44,8 +43,7 @@ func TestControl(t *testing.T) { newTree := func(t *testing.T) common.Storage { return NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index e5c83e5f2..5d158644e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -69,6 +70,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 27d13f4f3..84b9bc55f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -70,7 +71,8 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if !outOfBounds && !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } if outOfBounds { return true, err diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index ceb8fb7e3..5c2d58ca1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -249,12 +249,6 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres } func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) { - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - sysPath := filepath.Join(b.rootPath, path) entries, err := os.ReadDir(sysPath) if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 6438f715b..f2f9509ad 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string { return b.path } -// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDBManager struct { +// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. +type levelDbManager struct { dbMtx *sync.RWMutex databases map[uint64]*sharedDB @@ -157,8 +157,8 @@ type levelDBManager struct { func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDBManager { - result := &levelDBManager{ +) *levelDbManager { + result := &levelDbManager{ databases: make(map[uint64]*sharedDB), dbMtx: &sync.RWMutex{}, @@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st return result } -func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { +func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { res := m.getDBIfExists(idx) if res != nil { return res @@ -181,14 +181,14 @@ func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { return m.getOrCreateDB(idx) } -func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { +func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB { m.dbMtx.RLock() defer m.dbMtx.RUnlock() return m.databases[idx] } -func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { +func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { m.dbMtx.Lock() defer m.dbMtx.Unlock() @@ -202,7 +202,7 @@ func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { return db } -func (m *levelDBManager) hasAnyDB() bool { +func (m *levelDbManager) hasAnyDB() bool { m.dbMtx.RLock() defer m.dbMtx.RUnlock() @@ -213,7 +213,7 @@ func (m *levelDBManager) hasAnyDB() bool { // // The blobovnicza opens at the first request, closes after the last request. type dbManager struct { - levelToManager map[string]*levelDBManager + levelToManager map[string]*levelDbManager levelToManagerGuard *sync.RWMutex closedFlag *atomic.Bool dbCounter *openDBCounter @@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, options: options, readOnly: readOnly, metrics: metrics, - levelToManager: make(map[string]*levelDBManager), + levelToManager: make(map[string]*levelDbManager), levelToManagerGuard: &sync.RWMutex{}, log: log, closedFlag: &atomic.Bool{}, @@ -266,7 +266,7 @@ func (m *dbManager) Close() { m.dbCounter.WaitUntilAllClosed() } -func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { +func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { result := m.getLevelManagerIfExists(lvlPath) if result != nil { return result @@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { return m.getOrCreateLevelManager(lvlPath) } -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { +func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager { m.levelToManagerGuard.RLock() defer m.levelToManagerGuard.RUnlock() return m.levelToManager[lvlPath] } -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { +func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager { m.levelToManagerGuard.Lock() defer m.levelToManagerGuard.Unlock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 5f268b0f2..0e1b2022e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -19,7 +19,7 @@ type cfg struct { openedCacheSize int blzShallowDepth uint64 blzShallowWidth uint64 - compression *compression.Compressor + compression *compression.Config blzOpts []blobovnicza.Option reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics @@ -63,15 +63,10 @@ func initConfig(c *cfg) { } } -func WithBlobovniczaTreeLogger(log *logger.Logger) Option { +func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = log - } -} - -func WithBlobovniczaLogger(log *logger.Logger) Option { - return func(c *cfg) { - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) + c.log = l + c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 37c49d741..8276a25ef 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -82,14 +83,16 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return false, nil } if active == nil { - i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) + i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return false, nil } defer active.Close(ctx) @@ -103,7 +106,8 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } if errors.Is(err, blobovnicza.ErrNoSpace) { i.AllFull = true diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index a840275b8..16ef2b180 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm var res common.RebuildRes b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) - completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter) + completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage) res.ObjectsMoved += completedPreviosMoves if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) @@ -79,7 +79,7 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common. var completedDBCount uint32 for _, db := range dbs { b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) - movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter) + movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter) res.ObjectsMoved += movedObjects if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) @@ -195,7 +195,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil } -func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { +func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { shDB := b.getBlobovnicza(ctx, path) blz, err := shDB.Open(ctx) if err != nil { @@ -212,7 +212,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if err != nil { return 0, err } - migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter) + migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter) if err != nil { return migratedObjects, err } @@ -226,7 +226,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) - sysPath += rebuildSuffix + sysPath = sysPath + rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) if err != nil { return nil, err @@ -238,7 +238,7 @@ func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (fun }, nil } -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) { +func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { var result atomic.Uint64 batch := make(map[oid.Address][]byte) @@ -253,12 +253,7 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn }) for { - release, err := limiter.ReadRequest(ctx) - if err != nil { - return result.Load(), err - } - _, err = blz.Iterate(ctx, prm) - release() + _, err := blz.Iterate(ctx, prm) if err != nil && !errors.Is(err, errBatchFull) { return result.Load(), err } @@ -270,19 +265,13 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - release, err := limiter.AcquireWorkSlot(egCtx) - if err != nil { + if err := limiter.AcquireWorkSlot(egCtx); err != nil { _ = eg.Wait() return result.Load(), err } eg.Go(func() error { - defer release() - moveRelease, err := limiter.WriteRequest(ctx) - if err != nil { - return err - } - err = b.moveObject(egCtx, blz, blzPath, addr, data, meta) - moveRelease() + defer limiter.ReleaseWorkSlot() + err := b.moveObject(egCtx, blz, blzPath, addr, data, meta) if err == nil { result.Add(1) } @@ -328,7 +317,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo return nil } -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { +func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() @@ -341,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDB.CloseAndRemoveFile(ctx); err != nil { + if err := shDb.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) @@ -370,7 +359,7 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error { return b.dropDirectoryIfEmpty(filepath.Dir(path)) } -func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) { +func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) { var count uint64 var rebuildTempFilesToRemove []string err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { @@ -383,24 +372,13 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co } defer shDB.Close(ctx) - release, err := rateLimiter.ReadRequest(ctx) - if err != nil { - return false, err - } incompletedMoves, err := blz.ListMoveInfo(ctx) - release() if err != nil { return true, err } for _, move := range incompletedMoves { - release, err := rateLimiter.WriteRequest(ctx) - if err != nil { - return false, err - } - err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore) - release() - if err != nil { + if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil { return true, err } count++ @@ -410,14 +388,9 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co return false, nil }) for _, tmp := range rebuildTempFilesToRemove { - release, err := rateLimiter.WriteRequest(ctx) - if err != nil { - return count, err - } if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } - release() } return count, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 4146ef260..2f58624aa 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -140,8 +140,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -162,18 +161,16 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object storageIDs: make(map[oid.Address][]byte), guard: &sync.Mutex{}, } - limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 1, + MetaStorage: metaStub, + WorkerLimiter: &rebuildLimiterStub{}, + FillPercent: 1, }) require.NoError(t, err) require.Equal(t, uint64(1), rRes.ObjectsMoved) require.Equal(t, uint64(0), rRes.FilesRemoved) require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index a7a99fec3..aae72b5ff 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -2,9 +2,7 @@ package blobovniczatree import ( "context" - "fmt" "sync" - "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -50,8 +48,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -79,11 +76,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } - limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 60, + MetaStorage: metaStub, + WorkerLimiter: &rebuildLimiterStub{}, + FillPercent: 60, }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -98,7 +94,6 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) }) t.Run("no rebuild single db", func(t *testing.T) { @@ -107,8 +102,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -134,11 +128,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } - limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 90, // 64KB / 100KB = 64% + MetaStorage: metaStub, + WorkerLimiter: &rebuildLimiterStub{}, + FillPercent: 90, // 64KB / 100KB = 64% }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -153,7 +146,6 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by fill percent", func(t *testing.T) { @@ -162,8 +154,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -202,11 +193,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } - limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 80, + MetaStorage: metaStub, + WorkerLimiter: &rebuildLimiterStub{}, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -225,7 +215,6 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by overflow", func(t *testing.T) { @@ -234,8 +223,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -266,8 +254,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), WithBlobovniczaShallowDepth(1), @@ -279,11 +266,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) - limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - Limiter: limiter, - FillPercent: 80, + MetaStorage: metaStub, + WorkerLimiter: &rebuildLimiterStub{}, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -299,7 +285,6 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) }) } @@ -309,8 +294,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), // 64KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(2), // depth = 2 @@ -338,8 +322,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { b = NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(32*1024), // 32KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(3), // depth = 3 @@ -355,10 +338,9 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } - limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.Limiter = limiter + rPrm.WorkerLimiter = &rebuildLimiterStub{} rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -374,15 +356,13 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { } require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(sourceWidth), WithBlobovniczaShallowDepth(sourceDepth), @@ -423,8 +403,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta b = NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(targetWidth), WithBlobovniczaShallowDepth(targetDepth), @@ -448,10 +427,9 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta storageIDs: storageIDs, guard: &sync.Mutex{}, } - limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.Limiter = limiter + rPrm.WorkerLimiter = &rebuildLimiterStub{} rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -467,7 +445,6 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, b.Close(context.Background())) - require.NoError(t, limiter.ValidateReleased()) } type storageIDUpdateStub struct { @@ -485,36 +462,7 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr return nil } -type rebuildLimiterStub struct { - slots atomic.Int64 - readRequests atomic.Int64 - writeRequests atomic.Int64 -} +type rebuildLimiterStub struct{} -func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { - s.slots.Add(1) - return func() { s.slots.Add(-1) }, nil -} - -func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { - s.readRequests.Add(1) - return func() { s.readRequests.Add(-1) }, nil -} - -func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { - s.writeRequests.Add(1) - return func() { s.writeRequests.Add(-1) }, nil -} - -func (s *rebuildLimiterStub) ValidateReleased() error { - if v := s.slots.Load(); v != 0 { - return fmt.Errorf("invalid slots value %d", v) - } - if v := s.readRequests.Load(); v != 0 { - return fmt.Errorf("invalid read requests value %d", v) - } - if v := s.writeRequests.Load(); v != 0 { - return fmt.Errorf("invalid write requests value %d", v) - } - return nil -} +func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil } +func (s *rebuildLimiterStub) ReleaseWorkSlot() {} diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index ceaf2538a..f850f48b4 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -41,7 +41,7 @@ type SubStorageInfo struct { type Option func(*cfg) type cfg struct { - compression compression.Compressor + compression compression.Config log *logger.Logger storage []SubStorage metrics Metrics @@ -91,13 +91,50 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "BlobStor")) } } -func WithCompression(comp compression.Config) Option { +// WithCompressObjects returns option to toggle +// compression of the stored objects. +// +// If true, Zstandard algorithm is used for data compression. +// +// If compressor (decompressor) creation failed, +// the uncompressed option will be used, and the error +// is recorded in the provided log. +func WithCompressObjects(comp bool) Option { return func(c *cfg) { - c.compression.Config = comp + c.compression.Enabled = comp + } +} + +// WithCompressibilityEstimate returns an option to use +// normilized compressibility estimate to decide compress +// data or not. +// +// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 +func WithCompressibilityEstimate(v bool) Option { + return func(c *cfg) { + c.compression.UseCompressEstimation = v + } +} + +// WithCompressibilityEstimateThreshold returns an option to set +// normilized compressibility estimate threshold. +// +// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 +func WithCompressibilityEstimateThreshold(threshold float64) Option { + return func(c *cfg) { + c.compression.CompressEstimationThreshold = threshold + } +} + +// WithUncompressableContentTypes returns option to disable decompression +// for specific content types as seen by object.AttributeContentType attribute. +func WithUncompressableContentTypes(values []string) Option { + return func(c *cfg) { + c.compression.UncompressableContentTypes = values } } @@ -115,6 +152,6 @@ func WithMetrics(m Metrics) Option { } } -func (b *BlobStor) Compressor() *compression.Compressor { - return &b.compression +func (b *BlobStor) Compressor() *compression.Config { + return &b.cfg.compression } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index 6ddeb6f00..6cc56fa3b 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -52,9 +51,7 @@ func TestCompression(t *testing.T) { newBlobStor := func(t *testing.T, compress bool) *BlobStor { bs := New( - WithCompression(compression.Config{ - Enabled: compress, - }), + WithCompressObjects(compress), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) require.NoError(t, bs.Init(context.Background())) @@ -116,10 +113,8 @@ func TestBlobstor_needsCompression(t *testing.T) { dir := t.TempDir() bs := New( - WithCompression(compression.Config{ - Enabled: compress, - UncompressableContentTypes: ct, - }), + WithCompressObjects(compress), + WithUncompressableContentTypes(ct), WithStorages([]SubStorage{ { Storage: blobovniczatree.NewBlobovniczaTree( diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go index 788fe66f2..19e181ee7 100644 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ b/pkg/local_object_storage/blobstor/common/rebuild.go @@ -12,27 +12,16 @@ type RebuildRes struct { } type RebuildPrm struct { - MetaStorage MetaStorage - Limiter RebuildLimiter - FillPercent int + MetaStorage MetaStorage + WorkerLimiter ConcurrentWorkersLimiter + FillPercent int } type MetaStorage interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ReleaseFunc func() - -type ConcurrencyLimiter interface { - AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) -} - -type RateLimiter interface { - ReadRequest(context.Context) (ReleaseFunc, error) - WriteRequest(context.Context) (ReleaseFunc, error) -} - -type RebuildLimiter interface { - ConcurrencyLimiter - RateLimiter +type ConcurrentWorkersLimiter interface { + AcquireWorkSlot(ctx context.Context) error + ReleaseWorkSlot() } diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index e35c35e60..6ecef48cd 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -18,8 +18,8 @@ type Storage interface { Path() string ObjectsCount(ctx context.Context) (uint64, error) - SetCompressor(cc *compression.Compressor) - Compressor() *compression.Compressor + SetCompressor(cc *compression.Config) + Compressor() *compression.Config // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 445a0494b..9f70f8ec2 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -11,7 +11,7 @@ import ( ) func BenchmarkCompression(b *testing.B) { - c := Compressor{Config: Config{Enabled: true}} + c := Config{Enabled: true} require.NoError(b, c.Init()) for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { @@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) { } } -func benchWith(b *testing.B, c Compressor, data []byte) { +func benchWith(b *testing.B, c Config, data []byte) { b.ResetTimer() b.ReportAllocs() for range b.N { @@ -56,10 +56,8 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("estimate", func(b *testing.B) { b.ResetTimer() - c := &Compressor{ - Config: Config{ - Enabled: true, - }, + c := &Config{ + Enabled: true, } require.NoError(b, c.Init()) @@ -78,10 +76,8 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("compress", func(b *testing.B) { b.ResetTimer() - c := &Compressor{ - Config: Config{ - Enabled: true, - }, + c := &Config{ + Enabled: true, } require.NoError(b, c.Init()) diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index c76cec9a1..85ab47692 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -4,36 +4,21 @@ import ( "bytes" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/klauspost/compress" "github.com/klauspost/compress/zstd" ) -type Level string - -const ( - LevelDefault Level = "" - LevelOptimal Level = "optimal" - LevelFastest Level = "fastest" - LevelSmallestSize Level = "smallest_size" -) - -type Compressor struct { - Config - - encoder *zstd.Encoder - decoder *zstd.Decoder -} - // Config represents common compression-related configuration. type Config struct { Enabled bool UncompressableContentTypes []string - Level Level - EstimateCompressibility bool - EstimateCompressibilityThreshold float64 + UseCompressEstimation bool + CompressEstimationThreshold float64 + + encoder *zstd.Encoder + decoder *zstd.Decoder } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -41,11 +26,11 @@ type Config struct { var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} // Init initializes compression routines. -func (c *Compressor) Init() error { +func (c *Config) Init() error { var err error if c.Enabled { - c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) + c.encoder, err = zstd.NewWriter(nil) if err != nil { return err } @@ -88,7 +73,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { // Decompress decompresses data if it starts with the magic // and returns data untouched otherwise. -func (c *Compressor) Decompress(data []byte) ([]byte, error) { +func (c *Config) Decompress(data []byte) ([]byte, error) { if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { return data, nil } @@ -97,13 +82,13 @@ func (c *Compressor) Decompress(data []byte) ([]byte, error) { // Compress compresses data if compression is enabled // and returns data untouched otherwise. -func (c *Compressor) Compress(data []byte) []byte { +func (c *Config) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } - if c.EstimateCompressibility { + if c.UseCompressEstimation { estimated := compress.Estimate(data) - if estimated >= c.EstimateCompressibilityThreshold { + if estimated >= c.CompressEstimationThreshold { return c.compress(data) } return data @@ -111,7 +96,7 @@ func (c *Compressor) Compress(data []byte) []byte { return c.compress(data) } -func (c *Compressor) compress(data []byte) []byte { +func (c *Config) compress(data []byte) []byte { maxSize := c.encoder.MaxEncodedSize(len(data)) compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) if len(data) < len(compressed) { @@ -121,7 +106,7 @@ func (c *Compressor) compress(data []byte) []byte { } // Close closes encoder and decoder, returns any error occurred. -func (c *Compressor) Close() error { +func (c *Config) Close() error { var err error if c.encoder != nil { err = c.encoder.Close() @@ -131,24 +116,3 @@ func (c *Compressor) Close() error { } return err } - -func (c *Config) HasValidCompressionLevel() bool { - return c.Level == LevelDefault || - c.Level == LevelOptimal || - c.Level == LevelFastest || - c.Level == LevelSmallestSize -} - -func (c *Compressor) compressionLevel() zstd.EncoderLevel { - switch c.Level { - case LevelDefault, LevelOptimal: - return zstd.SpeedDefault - case LevelFastest: - return zstd.SpeedFastest - case LevelSmallestSize: - return zstd.SpeedBestCompression - default: - assert.Fail("unknown compression level", string(c.Level)) - return zstd.SpeedDefault - } -} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 0418eedd0..93316be02 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -6,7 +6,6 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "go.uber.org/zap" ) @@ -54,10 +53,6 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag func (b *BlobStor) Init(ctx context.Context) error { b.log.Debug(ctx, logs.BlobstorInitializing) - if !b.compression.HasValidCompressionLevel() { - b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) - b.compression.Level = compression.LevelDefault - } if err := b.compression.Init(); err != nil { return err } diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index c155e15b8..f1e45fe10 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -74,7 +75,8 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi for _, err := range errors[:len(errors)-1] { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return common.ExistsRes{}, errors[len(errors)-1] diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go index 3caee7ee1..b5dbc9e40 100644 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ b/pkg/local_object_storage/blobstor/fstree/counter.go @@ -2,8 +2,6 @@ package fstree import ( "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) // FileCounter used to count files in FSTree. The implementation must be thread-safe. @@ -54,11 +52,16 @@ func (c *SimpleCounter) Dec(size uint64) { c.mtx.Lock() defer c.mtx.Unlock() - assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") - c.count-- - - assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") - c.size -= size + if c.count > 0 { + c.count-- + } else { + panic("fstree.SimpleCounter: invalid count") + } + if c.size >= size { + c.size -= size + } else { + panic("fstree.SimpleCounter: invalid size") + } } func (c *SimpleCounter) CountSize() (uint64, uint64) { diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 112741ab4..031b385b2 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -45,7 +45,7 @@ type FSTree struct { log *logger.Logger - compressor *compression.Compressor + *compression.Config Depth uint64 DirNameLen int @@ -82,7 +82,7 @@ func New(opts ...Option) *FSTree { Permissions: 0o700, RootPath: "./", }, - compressor: nil, + Config: nil, Depth: 4, DirNameLen: DirNameLen, metrics: &noopMetrics{}, @@ -196,7 +196,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err == nil { - data, err = t.compressor.Decompress(data) + data, err = t.Decompress(data) } if err != nil { if prm.IgnoreErrors { @@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } if !prm.DontCompress { - prm.RawData = t.compressor.Compress(prm.RawData) + prm.RawData = t.Compress(prm.RawData) } size = len(prm.RawData) @@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err } } - data, err = t.compressor.Decompress(data) + data, err = t.Decompress(data) if err != nil { return common.GetRes{}, err } @@ -597,12 +597,12 @@ func (t *FSTree) Path() string { } // SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Compressor) { - t.compressor = cc +func (t *FSTree) SetCompressor(cc *compression.Config) { + t.Config = cc } -func (t *FSTree) Compressor() *compression.Compressor { - return t.compressor +func (t *FSTree) Compressor() *compression.Config { + return t.Config } // SetReportErrorFunc implements common.Storage. diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 6d633dad6..4110ba7d7 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -67,9 +67,12 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { err := w.writeFile(tmpPath, data) if err != nil { var pe *fs.PathError - if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) + if errors.As(err, &pe) { + switch pe.Err { + case syscall.ENOSPC: + err = common.ErrNoSpace + _ = os.RemoveAll(tmpPath) + } } return err } @@ -133,6 +136,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error { if err := os.Remove(p); err != nil { return err } - w.fileCounter.Dec(size) + w.fileCounter.Dec(uint64(size)) return nil } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go index 49cbda344..3561c616b 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go @@ -69,13 +69,10 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { if err != nil { return err } - written := 0 tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10) n, err := unix.Write(fd, data) - for err == nil { - written += n - - if written == len(data) { + if err == nil { + if n == len(data) { err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW) if err == nil { w.fileCounter.Inc(uint64(len(data))) @@ -83,23 +80,9 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { if errors.Is(err, unix.EEXIST) { err = nil } - break + } else { + err = errors.New("incomplete write") } - - // From man 2 write: - // https://www.man7.org/linux/man-pages/man2/write.2.html - // - // Note that a successful write() may transfer fewer than count - // bytes. Such partial writes can occur for various reasons; for - // example, because there was insufficient space on the disk device - // to write all of the requested bytes, or because a blocked write() - // to a socket, pipe, or similar was interrupted by a signal handler - // after it had transferred some, but before it had transferred all - // of the requested bytes. In the event of a partial write, the - // caller can make another write() call to transfer the remaining - // bytes. The subsequent call will either transfer further bytes or - // may result in an error (e.g., if the disk is now full). - n, err = unix.Write(fd, data[written:]) } errClose := unix.Close(fd) if err != nil { @@ -131,7 +114,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error { return logicerr.Wrap(new(apistatus.ObjectNotFound)) } if err == nil { - w.fileCounter.Dec(size) + w.fileCounter.Dec(uint64(size)) } return err } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go deleted file mode 100644 index 7fae2e695..000000000 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build linux && integration - -package fstree - -import ( - "context" - "errors" - "os" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" -) - -func TestENOSPC(t *testing.T) { - dir, err := os.MkdirTemp(t.TempDir(), "ramdisk") - require.NoError(t, err) - - f, err := os.CreateTemp(t.TempDir(), "ramdisk_*") - require.NoError(t, err) - - err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M") - if errors.Is(err, unix.EPERM) { - t.Skipf("skip size tests: no permission to mount: %v", err) - return - } - require.NoError(t, err) - defer func() { - require.NoError(t, unix.Unmount(dir, 0)) - }() - - fst := New(WithPath(dir), WithDepth(1)) - require.NoError(t, fst.Open(mode.ComponentReadWrite)) - require.NoError(t, fst.Init()) - - _, err = fst.Put(context.Background(), common.PutPrm{ - RawData: make([]byte, 10<<20), - }) - require.ErrorIs(t, err, common.ErrNoSpace) -} diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 6f2ac87e1..7155ddcbb 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -4,6 +4,7 @@ import ( "io/fs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "go.uber.org/zap" ) type Option func(*FSTree) @@ -52,6 +53,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = l + f.log = l.With(zap.String("component", "FSTree")) } } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index d54c54f59..36b2c33f8 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -3,7 +3,6 @@ package blobstortest import ( "context" "errors" - "slices" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -27,7 +26,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { _, err := s.Delete(context.Background(), delPrm) require.NoError(t, err) - objects = slices.Delete(objects, delID, delID+1) + objects = append(objects[:delID], objects[delID+1:]...) runTestNormalHandler(t, s, objects) @@ -50,7 +49,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) _, err := s.Iterate(context.Background(), iterPrm) require.NoError(t, err) - require.Len(t, objects, len(seen)) + require.Equal(t, len(objects), len(seen)) for i := range objects { d, ok := seen[objects[i].addr.String()] require.True(t, ok) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 2786321a8..ccfa510fe 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -8,7 +8,6 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -25,9 +24,7 @@ func TestIterateObjects(t *testing.T) { // create BlobStor instance blobStor := New( WithStorages(defaultStorages(p, smalSz)), - WithCompression(compression.Config{ - Enabled: true, - }), + WithCompressObjects(true), ) defer os.RemoveAll(p) diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 3df96a1c3..95a916662 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -16,7 +16,7 @@ func (s *memstoreImpl) Init() error func (s *memstoreImpl) Close(context.Context) error { return nil } func (s *memstoreImpl) Type() string { return Type } func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } +func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 7ef7e37a4..3afef7d18 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common elem := common.IterationElement{ ObjectData: v, } - if err := elem.Address.DecodeString(k); err != nil { + if err := elem.Address.DecodeString(string(k)); err != nil { if req.IgnoreErrors { continue } - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err)) + return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err)) } var err error if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil { diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 7605af4e5..97a03993d 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -7,7 +7,7 @@ import ( type cfg struct { rootPath string readOnly bool - compression *compression.Compressor + compression *compression.Config } func defaultConfig() *cfg { diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go index f28816555..2a6b94789 100644 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ b/pkg/local_object_storage/blobstor/rebuild.go @@ -13,14 +13,19 @@ type StorageIDUpdate interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error { +type ConcurrentWorkersLimiter interface { + AcquireWorkSlot(ctx context.Context) error + ReleaseWorkSlot() +} + +func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error { var summary common.RebuildRes var rErr error for _, storage := range b.storage { res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{ - MetaStorage: upd, - Limiter: concLimiter, - FillPercent: fillPercent, + MetaStorage: upd, + WorkerLimiter: limiter, + FillPercent: fillPercent, }) summary.FilesRemoved += res.FilesRemoved summary.ObjectsMoved += res.ObjectsMoved diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index 3a38ecf82..fb1188751 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -17,8 +17,8 @@ type cfg struct { Type func() string Path func() string - SetCompressor func(cc *compression.Compressor) - Compressor func() *compression.Compressor + SetCompressor func(cc *compression.Config) + Compressor func() *compression.Config SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) @@ -45,11 +45,11 @@ func WithClose(f func() error) Option { return func(c *cfg) { c func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } -func WithSetCompressor(f func(*compression.Compressor)) Option { +func WithSetCompressor(f func(*compression.Config)) Option { return func(c *cfg) { c.overrides.SetCompressor = f } } -func WithCompressor(f func() *compression.Compressor) Option { +func WithCompressor(f func() *compression.Config) Option { return func(c *cfg) { c.overrides.Compressor = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index 190b6a876..626ba0023 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -116,7 +116,7 @@ func (s *TestStore) Path() string { } } -func (s *TestStore) SetCompressor(cc *compression.Compressor) { +func (s *TestStore) SetCompressor(cc *compression.Config) { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Compressor) { } } -func (s *TestStore) Compressor() *compression.Compressor { +func (s *TestStore) Compressor() *compression.Config { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index e0617a832..24059a3f9 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -48,9 +48,8 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var csErr error - res, csErr = e.containerSize(ctx, prm) - return csErr + res, err = e.containerSize(ctx, prm) + return err }) return @@ -70,13 +69,12 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er return res.Size(), nil } -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { - var res ContainerSizeRes - err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.ContainerSize(ctx, csPrm) + csRes, err := sh.Shard.ContainerSize(csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) @@ -88,7 +86,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) return false }) - return res, err + return } // ListContainers returns a unique container IDs presented in the engine objects. @@ -98,9 +96,8 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) defer elapsed("ListContainers", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var lcErr error - res, lcErr = e.listContainers(ctx) - return lcErr + res, err = e.listContainers(ctx) + return err }) return @@ -121,8 +118,8 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { uniqueIDs := make(map[string]cid.ID) - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { e.reportShardError(ctx, sh, "can't get list of containers", err) return false @@ -136,9 +133,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, } return false - }); err != nil { - return ListContainersRes{}, err - } + }) result := make([]cid.ID, 0, len(uniqueIDs)) for _, v := range uniqueIDs { diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index bf1649f6e..6a416cfd9 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -22,6 +22,10 @@ type shardInitError struct { // Open opens all StorageEngine's components. func (e *StorageEngine) Open(ctx context.Context) error { + return e.open(ctx) +} + +func (e *StorageEngine) open(ctx context.Context) error { e.mtx.Lock() defer e.mtx.Unlock() @@ -73,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { errCh := make(chan shardInitError, len(e.shards)) var eg errgroup.Group - if e.lowMem && e.anyShardRequiresRefill() { + if e.cfg.lowMem && e.anyShardRequiresRefill() { eg.SetLimit(1) } @@ -145,14 +149,20 @@ var errClosed = errors.New("storage engine is closed") func (e *StorageEngine) Close(ctx context.Context) error { close(e.closeCh) defer e.wg.Wait() - return e.closeEngine(ctx) + return e.setBlockExecErr(ctx, errClosed) } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) closeAllShards(ctx context.Context) error { +func (e *StorageEngine) close(ctx context.Context, releasePools bool) error { e.mtx.RLock() defer e.mtx.RUnlock() + if releasePools { + for _, p := range e.shardPools { + p.Release() + } + } + for id, sh := range e.shards { if err := sh.Close(ctx); err != nil { e.log.Debug(ctx, logs.EngineCouldNotCloseShard, @@ -172,23 +182,70 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error { e.blockExec.mtx.RLock() defer e.blockExec.mtx.RUnlock() - if e.blockExec.closed { - return errClosed + if e.blockExec.err != nil { + return e.blockExec.err } return op() } -func (e *StorageEngine) closeEngine(ctx context.Context) error { +// sets the flag of blocking execution of all data operations according to err: +// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method +// (if err == errClosed => additionally releases pools and does not allow to resume executions). +// - otherwise, resumes execution. If exec was blocked, calls open method. +// +// Can be called concurrently with exec. In this case it waits for all executions to complete. +func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { e.blockExec.mtx.Lock() defer e.blockExec.mtx.Unlock() - if e.blockExec.closed { + prevErr := e.blockExec.err + + wasClosed := errors.Is(prevErr, errClosed) + if wasClosed { return errClosed } - e.blockExec.closed = true - return e.closeAllShards(ctx) + e.blockExec.err = err + + if err == nil { + if prevErr != nil { // block -> ok + return e.open(ctx) + } + } else if prevErr == nil { // ok -> block + return e.close(ctx, errors.Is(err, errClosed)) + } + + // otherwise do nothing + + return nil +} + +// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err. +// To resume the execution, use ResumeExecution method. +// +// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources +// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions +// to complete). Returns error if any Close has been called before. +// +// Must not be called concurrently with either Open or Init. +// +// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution +// for this. +func (e *StorageEngine) BlockExecution(err error) error { + return e.setBlockExecErr(context.Background(), err) +} + +// ResumeExecution resumes the execution of any data-related operation. +// To block the execution, use BlockExecution method. +// +// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources +// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions +// to complete). Returns error if any Close has been called before. +// +// Must not be called concurrently with either Open or Init. +func (e *StorageEngine) ResumeExecution() error { + return e.setBlockExecErr(context.Background(), nil) } type ReConfiguration struct { diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index 4ff0ed5ec..c9efc312c 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -2,6 +2,7 @@ package engine import ( "context" + "errors" "fmt" "io/fs" "os" @@ -11,14 +12,17 @@ import ( "testing" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -159,6 +163,42 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O require.Equal(t, 1, shardCount) } +func TestExecBlocks(t *testing.T) { + e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many + + // put some object + obj := testutil.GenerateObjectWithCID(cidtest.ID()) + + addr := object.AddressOf(obj) + + require.NoError(t, Put(context.Background(), e, obj, false)) + + // block executions + errBlock := errors.New("block exec err") + + require.NoError(t, e.BlockExecution(errBlock)) + + // try to exec some op + _, err := Head(context.Background(), e, addr) + require.ErrorIs(t, err, errBlock) + + // resume executions + require.NoError(t, e.ResumeExecution()) + + _, err = Head(context.Background(), e, addr) // can be any data-related op + require.NoError(t, err) + + // close + require.NoError(t, e.Close(context.Background())) + + // try exec after close + _, err = Head(context.Background(), e, addr) + require.Error(t, err) + + // try to resume + require.Error(t, e.ResumeExecution()) +} + func TestPersistentShardID(t *testing.T) { dir := t.TempDir() @@ -205,6 +245,7 @@ func TestReload(t *testing.T) { // no new paths => no new shards require.Equal(t, shardNum, len(e.shards)) + require.Equal(t, shardNum, len(e.shardPools)) newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) @@ -216,6 +257,7 @@ func TestReload(t *testing.T) { require.NoError(t, e.Reload(context.Background(), rcfg)) require.Equal(t, shardNum+1, len(e.shards)) + require.Equal(t, shardNum+1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -235,6 +277,7 @@ func TestReload(t *testing.T) { // removed one require.Equal(t, shardNum-1, len(e.shards)) + require.Equal(t, shardNum-1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -268,6 +311,7 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str } require.Equal(t, num, len(e.shards)) + require.Equal(t, num, len(e.shardPools)) return e, currShards } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 223cdbc48..65ccbdb9e 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -23,6 +24,9 @@ type DeletePrm struct { forceRemoval bool } +// DeleteRes groups the resulting values of Delete operation. +type DeleteRes struct{} + // WithAddress is a Delete option to set the addresses of the objects to delete. // // Option is required. @@ -47,7 +51,7 @@ func (p *DeletePrm) WithForceRemoval() { // NOTE: Marks any object to be deleted (despite any prohibitions // on operations with that object) if WithForceRemoval option has // been provided. -func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error { +func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete", trace.WithAttributes( attribute.String("address", prm.addr.EncodeToString()), @@ -56,12 +60,15 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error { defer span.End() defer elapsed("Delete", e.metrics.AddMethodDuration)() - return e.execIfNotBlocked(func() error { - return e.delete(ctx, prm) + err = e.execIfNotBlocked(func() error { + res, err = e.delete(ctx, prm) + return err }) + + return } -func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { +func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { var locked struct { is bool } @@ -71,7 +78,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // Removal of a big object is done in multiple stages: // 1. Remove the parent object. If it is locked or already removed, return immediately. // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { var existsPrm shard.ExistsPrm existsPrm.Address = prm.addr @@ -116,22 +123,20 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // If a parent object is removed we should set GC mark on each shard. return splitInfo == nil - }); err != nil { - return err - } + }) if locked.is { - return new(apistatus.ObjectLocked) + return DeleteRes{}, new(apistatus.ObjectLocked) } if splitInfo != nil { - return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) + e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } - return nil + return DeleteRes{}, nil } -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { +func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { var fs objectSDK.SearchFilters fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) @@ -144,12 +149,13 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo inhumePrm.ForceRemoval() } - return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return false } @@ -160,7 +166,8 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) continue } } @@ -189,7 +196,8 @@ func (e *StorageEngine) deleteChunks( if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) continue } } diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index a56598c09..0dd2e94bb 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -70,7 +70,8 @@ func TestDeleteBigObject(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - require.NoError(t, e.Delete(context.Background(), deletePrm)) + _, err := e.Delete(context.Background(), deletePrm) + require.NoError(t, err) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -140,7 +141,8 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - require.NoError(t, e.Delete(context.Background(), deletePrm)) + _, err := e.Delete(context.Background(), deletePrm) + require.NoError(t, err) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -151,7 +153,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { // delete physical var delPrm shard.DeletePrm delPrm.SetAddresses(addrParent) - _, err := s1.Delete(context.Background(), delPrm) + _, err = s1.Delete(context.Background(), delPrm) require.NoError(t, err) delPrm.SetAddresses(addrLink) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 376d545d3..f82268d1d 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -12,8 +12,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -28,13 +28,16 @@ type StorageEngine struct { shards map[string]hashedShard + shardPools map[string]util.WorkerPool + closeCh chan struct{} setModeCh chan setModeRequest wg sync.WaitGroup blockExec struct { - mtx sync.RWMutex - closed bool + mtx sync.RWMutex + + err error } evacuateLimiter *evacuationLimiter } @@ -173,10 +176,7 @@ func (e *StorageEngine) reportShardError( } func isLogical(err error) bool { - return errors.As(err, &logicerr.Logical{}) || - errors.Is(err, context.Canceled) || - errors.Is(err, context.DeadlineExceeded) || - errors.As(err, new(*apistatus.ResourceExhausted)) + return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) } // Option represents StorageEngine's constructor option. @@ -189,6 +189,8 @@ type cfg struct { metrics MetricRegister + shardPoolSize uint32 + lowMem bool containerSource atomic.Pointer[containerSource] @@ -196,8 +198,9 @@ type cfg struct { func defaultCfg() *cfg { res := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - metrics: noopMetrics{}, + log: logger.NewLoggerWrapper(zap.L()), + shardPoolSize: 20, + metrics: noopMetrics{}, } res.containerSource.Store(&containerSource{}) return res @@ -211,18 +214,13 @@ func New(opts ...Option) *StorageEngine { opts[i](c) } - evLimMtx := &sync.RWMutex{} - evLimCond := sync.NewCond(evLimMtx) - return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{ - guard: evLimMtx, - statusCond: evLimCond, - }, + cfg: c, + shards: make(map[string]hashedShard), + shardPools: make(map[string]util.WorkerPool), + closeCh: make(chan struct{}), + setModeCh: make(chan setModeRequest), + evacuateLimiter: &evacuationLimiter{}, } } @@ -239,6 +237,13 @@ func WithMetrics(v MetricRegister) Option { } } +// WithShardPoolSize returns option to specify size of worker pool for each shard. +func WithShardPoolSize(sz uint32) Option { + return func(c *cfg) { + c.shardPoolSize = sz + } +} + // WithErrorThreshold returns an option to specify size amount of errors after which // shard is moved to read-only mode. func WithErrorThreshold(sz uint32) Option { @@ -274,7 +279,7 @@ func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) ( return true, nil } - wasRemoved, err := container.WasRemoved(ctx, s.cs, id) + wasRemoved, err := container.WasRemoved(s.cs, id) if err != nil { return false, err } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index fc6d9ee9c..926ff43f3 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -2,14 +2,9 @@ package engine import ( "context" - "fmt" "path/filepath" - "runtime/debug" - "strings" - "sync" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -60,6 +55,7 @@ func (te *testEngineWrapper) setShardsNumOpts( te.shardIDs[i] = shard.ID() } require.Len(t, te.engine.shards, num) + require.Len(t, te.engine.shardPools, num) return te } @@ -94,7 +90,6 @@ func testGetDefaultShardOptions(t testing.TB) []shard.Option { ), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), - shard.WithLimiter(&testQoSLimiter{t: t}), } } @@ -116,8 +111,7 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1), blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), + blobovniczatree.WithLogger(test.NewLogger(t))), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < smallSize }, @@ -157,78 +151,3 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, }, smallFileStorage, largeFileStorage } - -var _ qos.Limiter = (*testQoSLimiter)(nil) - -type testQoSLimiter struct { - t testing.TB - quard sync.Mutex - id int64 - readStacks map[int64][]byte - writeStacks map[int64][]byte -} - -func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} - -func (t *testQoSLimiter) Close() { - t.quard.Lock() - defer t.quard.Unlock() - - var sb strings.Builder - var seqN int - for _, stack := range t.readStacks { - seqN++ - sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) - } - for _, stack := range t.writeStacks { - seqN++ - sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) - } - require.True(t.t, seqN == 0, sb.String()) -} - -func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { - t.quard.Lock() - defer t.quard.Unlock() - - stack := debug.Stack() - - t.id++ - id := t.id - - if t.readStacks == nil { - t.readStacks = make(map[int64][]byte) - } - t.readStacks[id] = stack - - return func() { - t.quard.Lock() - defer t.quard.Unlock() - - delete(t.readStacks, id) - }, nil -} - -func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { - t.quard.Lock() - defer t.quard.Unlock() - - stack := debug.Stack() - - t.id++ - id := t.id - - if t.writeStacks == nil { - t.writeStacks = make(map[int64][]byte) - } - t.writeStacks[id] = stack - - return func() { - t.quard.Lock() - defer t.quard.Unlock() - - delete(t.writeStacks, id) - }, nil -} - -func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 57029dd5f..d68a7e826 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -46,6 +46,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) var testShards [2]*testShard te := testNewEngine(t, + WithShardPoolSize(1), WithErrorThreshold(errThreshold), ). setShardsNumOpts(t, 2, func(id int) []shard.Option { diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index c08dfbf03..623f5c941 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "slices" "strings" "sync" "sync/atomic" @@ -15,6 +14,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -200,6 +201,11 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { return res } +type pooledShard struct { + hashedShard + pool util.WorkerPool +} + var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") // Evacuate moves data from one shard to the others. @@ -246,10 +252,11 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro } var mtx sync.RWMutex - copyShards := func() []hashedShard { + copyShards := func() []pooledShard { mtx.RLock() defer mtx.RUnlock() - t := slices.Clone(shards) + t := make([]pooledShard, len(shards)) + copy(t, shards) return t } eg.Go(func() error { @@ -260,7 +267,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) error { var err error ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", @@ -277,12 +284,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p }() e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.Stringer("scope", prm.Scope)) + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) err = e.getTotals(ctx, prm, shardsToEvacuate, res) if err != nil { e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, - zap.Stringer("scope", prm.Scope)) + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) return err } @@ -316,7 +323,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p } if err != nil { e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.Stringer("scope", prm.Scope)) + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) return err } @@ -382,7 +389,7 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha } func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", @@ -406,7 +413,7 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel } func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { sh := shardsToEvacuate[shardID] @@ -419,7 +426,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context } egContainer.Go(func() error { var skip bool - c, err := e.containerSource.Load().cs.Get(ctx, cnt) + c, err := e.containerSource.Load().cs.Get(cnt) if err != nil { if client.IsErrContainerNotFound(err) { skip = true @@ -473,13 +480,14 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context err := sh.IterateOverContainers(ctx, cntPrm) if err != nil { cancel(err) - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return err } func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) error { sh := shardsToEvacuate[shardID] shards := getShards() @@ -509,7 +517,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, } func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", trace.WithAttributes( @@ -532,7 +540,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID), - evacuationOperationLogField) + evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) res.trEvacuated.Add(1) continue } @@ -542,26 +550,26 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err)) + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } if moved { e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote, zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID), zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK), - evacuationOperationLogField) + evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) res.trEvacuated.Add(1) } else if prm.IgnoreErrors { res.trFailed.Add(1) e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err)) + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err)) + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID) } } @@ -577,7 +585,7 @@ func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.S } func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) (bool, string, error) { target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) if err != nil { @@ -647,15 +655,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar // findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, - shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, -) (hashedShard, bool, error) { + shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +) (pooledShard, bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) - var result hashedShard + var result pooledShard var found bool for _, target := range shards { select { case <-ctx.Done(): - return hashedShard{}, false, ctx.Err() + return pooledShard{}, false, ctx.Err() default: } @@ -683,7 +691,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora return result, found, nil } -func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { +func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) { e.mtx.RLock() defer e.mtx.RUnlock() @@ -713,15 +721,18 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) // We must have all shards, to have correct information about their // indexes in a sorted slice and set appropriate marks in the metabase. // Evacuated shard is skipped during put. - shards := make([]hashedShard, 0, len(e.shards)) + shards := make([]pooledShard, 0, len(e.shards)) for id := range e.shards { - shards = append(shards, e.shards[id]) + shards = append(shards, pooledShard{ + hashedShard: hashedShard(e.shards[id]), + pool: e.shardPools[id], + }) } return shards, nil } func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, + getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() @@ -745,7 +756,8 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI res.objFailed.Add(1) return nil } - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } @@ -766,14 +778,16 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) if err != nil { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } if moved { res.objEvacuated.Add(1) } else if prm.IgnoreErrors { res.objFailed.Add(1) - e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { return fmt.Errorf("object %s was not replicated", addr) } @@ -791,7 +805,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool { } func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, + shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, ) (bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) for j := range shards { @@ -804,14 +818,15 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } - switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { + switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr), - evacuationOperationLogField) + evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return true, nil case putToShardExists, putToShardRemoved: res.objSkipped.Add(1) diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index b75e8686d..1e6b9ccb1 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -3,7 +3,6 @@ package engine import ( "context" "fmt" - "slices" "sync" "time" @@ -95,7 +94,8 @@ func (s *EvacuationState) StartedAt() *time.Time { if s == nil { return nil } - if s.startedAt.IsZero() { + defaultTime := time.Time{} + if s.startedAt == defaultTime { return nil } return &s.startedAt @@ -105,7 +105,8 @@ func (s *EvacuationState) FinishedAt() *time.Time { if s == nil { return nil } - if s.finishedAt.IsZero() { + defaultTime := time.Time{} + if s.finishedAt == defaultTime { return nil } return &s.finishedAt @@ -122,7 +123,8 @@ func (s *EvacuationState) DeepCopy() *EvacuationState { if s == nil { return nil } - shardIDs := slices.Clone(s.shardIDs) + shardIDs := make([]string, len(s.shardIDs)) + copy(shardIDs, s.shardIDs) return &EvacuationState{ shardIDs: shardIDs, @@ -139,8 +141,7 @@ type evacuationLimiter struct { eg *errgroup.Group cancel context.CancelFunc - guard *sync.RWMutex - statusCond *sync.Cond // used in unit tests + guard sync.RWMutex } func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { @@ -166,7 +167,6 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res startedAt: time.Now().UTC(), result: result, } - l.statusCond.Broadcast() return l.eg, egCtx, nil } @@ -182,7 +182,6 @@ func (l *evacuationLimiter) Complete(err error) { l.state.processState = EvacuateProcessStateCompleted l.state.errMessage = errMsq l.state.finishedAt = time.Now().UTC() - l.statusCond.Broadcast() l.eg = nil } @@ -217,7 +216,6 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error { l.state = EvacuationState{} l.eg = nil l.cancel = nil - l.statusCond.Broadcast() return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index f2ba7d994..248c39155 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -37,7 +37,7 @@ type containerStorage struct { latency time.Duration } -func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) { time.Sleep(cs.latency) v, ok := cs.cntmap[id] if !ok { @@ -49,7 +49,7 @@ func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer. return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } @@ -196,6 +196,7 @@ func TestEvacuateShardObjects(t *testing.T) { e.mtx.Lock() delete(e.shards, evacuateShardID) + delete(e.shardPools, evacuateShardID) e.mtx.Unlock() checkHasObjects(t) @@ -204,10 +205,11 @@ func TestEvacuateShardObjects(t *testing.T) { func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { var st *EvacuationState var err error - e.evacuateLimiter.waitForCompleted() - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err) - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + require.Eventually(t, func() bool { + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + return st.ProcessingStatus() == EvacuateProcessStateCompleted + }, 3*time.Second, 10*time.Millisecond) return st } @@ -403,8 +405,8 @@ func TestEvacuateSingleProcess(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan any) - running := make(chan any) + blocker := make(chan interface{}) + running := make(chan interface{}) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -445,8 +447,8 @@ func TestEvacuateObjectsAsync(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan any) - running := make(chan any) + blocker := make(chan interface{}) + running := make(chan interface{}) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -473,7 +475,7 @@ func TestEvacuateObjectsAsync(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") - st := testWaitForEvacuationCompleted(t, e) + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") return nil }) @@ -816,12 +818,3 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { t.Logf("evacuate took %v\n", time.Since(start)) require.NoError(t, err) } - -func (l *evacuationLimiter) waitForCompleted() { - l.guard.Lock() - defer l.guard.Unlock() - - for l.state.processState != EvacuateProcessStateCompleted { - l.statusCond.Wait() - } -} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 7dac9eb97..9d2b1c1b7 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool exists := false locked := false - if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Exists(ctx, shPrm) if err != nil { if client.IsErrObjectAlreadyRemoved(err) { @@ -50,9 +50,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } return false - }); err != nil { - return false, false, err - } + }) if alreadyRemoved { return false, false, new(apistatus.ObjectAlreadyRemoved) diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 0694c53f3..81b027c26 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -78,9 +79,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { Engine: e, } - if err := it.tryGetWithMeta(ctx); err != nil { - return GetRes{}, err - } + it.tryGetWithMeta(ctx) if it.SplitInfo != nil { return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -99,9 +98,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } - if err := it.tryGetFromBlobstore(ctx); err != nil { - return GetRes{}, err - } + it.tryGetFromBlobstore(ctx) if it.Object == nil { return GetRes{}, it.OutError @@ -110,7 +107,8 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), zap.Error(it.MetaError), - zap.Stringer("address", prm.addr)) + zap.Stringer("address", prm.addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } @@ -137,8 +135,8 @@ type getShardIterator struct { ecInfoErr *objectSDK.ECInfoError } -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.ShardPrm.SetIgnoreMeta(noMeta) @@ -191,13 +189,13 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { }) } -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { +func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already visited. return false diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d436dd411..d6892f129 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -82,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) shPrm.SetAddress(prm.addr) shPrm.SetRaw(prm.raw) - if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold res, err := sh.Head(ctx, shPrm) if err != nil { @@ -123,9 +123,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) } head = res.Object() return true - }); err != nil { - return HeadRes{}, err - } + }) if head != nil { return HeadRes{head: head}, nil diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e5f7072e2..bae784064 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -26,6 +27,9 @@ type InhumePrm struct { forceRemoval bool } +// InhumeRes encapsulates results of inhume operation. +type InhumeRes struct{} + // WithTarget sets a list of objects that should be inhumed and tombstone address // as the reason for inhume operation. // @@ -63,20 +67,23 @@ var errInhumeFailure = errors.New("inhume operation failed") // with that object) if WithForceRemoval option has been provided. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { +func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") defer span.End() defer elapsed("Inhume", e.metrics.AddMethodDuration)() - return e.execIfNotBlocked(func() error { - return e.inhume(ctx, prm) + err = e.execIfNotBlocked(func() error { + res, err = e.inhume(ctx, prm) + return err }) + + return } -func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { - addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) +func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { + addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) if err != nil { - return err + return InhumeRes{}, err } var shPrm shard.InhumePrm @@ -84,6 +91,8 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { shPrm.ForceRemoval() } + var errLocked *apistatus.ObjectLocked + for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { shPrm.SetTarget(*prm.tombstone, addrs...) @@ -96,112 +105,45 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Error(errors.New("this shard was expected to exist")), zap.String("shard_id", shardID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) - return errInhumeFailure + return InhumeRes{}, errInhumeFailure } if _, err := sh.Inhume(ctx, shPrm); err != nil { - e.reportInhumeError(ctx, err, sh) - return err + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) + } + return InhumeRes{}, err } } - return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) -} - -func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { - if err == nil { - return - } - - var errLocked *apistatus.ObjectLocked - switch { - case errors.As(err, &errLocked): - case errors.Is(err, shard.ErrLockObjectRemoval): - case errors.Is(err, shard.ErrReadOnlyMode): - case errors.Is(err, shard.ErrDegradedMode): - default: - e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) - } -} - -// inhumeNotFoundObjects removes object which are not found on any shard. -// -// Besides an object not being found on any shard, it is also important to -// remove it anyway in order to populate the metabase indexes because they are -// responsible for the correct object status, i.e., the status will be `object -// not found` without the indexes, the status will be `object is already -// removed` with the indexes. -// -// It is suggested to evenly remove those objects on each shard with the batch -// size equal to 1 + floor(number of objects / number of shards). -func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { - if len(addrs) == 0 { - return nil - } - - var shPrm shard.InhumePrm - if prm.forceRemoval { - shPrm.ForceRemoval() - } - - numObjectsPerShard := 1 + len(addrs)/len(e.shards) - - var inhumeErr error - itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { - numObjects := min(numObjectsPerShard, len(addrs)) - - if numObjects == 0 { - return true - } - - if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) - } else { - shPrm.MarkAsGarbage(addrs[:numObjects]...) - } - addrs = addrs[numObjects:] - - _, inhumeErr = hs.Inhume(ctx, shPrm) - e.reportInhumeError(ctx, inhumeErr, hs) - return inhumeErr != nil - }) - if inhumeErr != nil { - return inhumeErr - } - return itErr + return InhumeRes{}, nil } // groupObjectsByShard groups objects based on the shard(s) they are stored on. // // If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of // the objects are locked. -// -// Returns two sets of objects: found objects which are grouped per shard and -// not found object. Not found objects are objects which are not found on any -// shard. This can happen if a node is a container node but doesn't participate -// in a replica group of the object. -func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { - groups = make(map[string][]oid.Address) +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (map[string][]oid.Address, error) { + groups := make(map[string][]oid.Address) - var ids []string for _, addr := range addrs { - ids, err = e.findShards(ctx, addr, checkLocked) + ids, err := e.findShards(ctx, addr, checkLocked) if err != nil { - return + return nil, err } - - if len(ids) == 0 { - notFoundObjects = append(notFoundObjects, addr) - continue - } - for _, id := range ids { groups[id] = append(groups[id], addr) } } - return + return groups, nil } // findShards determines the shard(s) where the object is stored. @@ -224,7 +166,7 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL objectExists bool ) - if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { objectExists = false prm.Address = addr @@ -252,11 +194,16 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL default: } + if !objectExists { + return + } + if checkLocked { if isLocked, err := sh.IsLocked(ctx, addr); err != nil { e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, zap.Error(err), zap.Stringer("address", addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } else if isLocked { retErr = new(apistatus.ObjectLocked) @@ -264,20 +211,11 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL } } - // This exit point must come after checking if the object is locked, - // since the locked index may be populated even if the object doesn't - // exist. - if !objectExists { - return - } - ids = append(ids, sh.ID().String()) // Continue if it's a root object. return !isRootObject - }); err != nil { - return nil, err - } + }) if retErr != nil { return nil, retErr @@ -297,18 +235,17 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var err error var outErr error - if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { - locked, err = h.IsLocked(ctx, addr) + e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { + locked, err = h.Shard.IsLocked(ctx, addr) if err != nil { - e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err return false } return locked - }); err != nil { - return false, err - } + }) if locked { return locked, nil @@ -328,17 +265,16 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I var allLocks []oid.ID var outErr error - if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { - locks, err := h.GetLocks(ctx, addr) + e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { + locks, err := h.Shard.GetLocks(ctx, addr) if err != nil { - e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) + e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err } allLocks = append(allLocks, locks...) return false - }); err != nil { - return nil, err - } + }) if len(allLocks) > 0 { return allLocks, nil } @@ -346,23 +282,20 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) return true default: return false } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) - } + }) } func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { sh.HandleExpiredLocks(ctx, epoch, lockers) select { @@ -372,13 +305,11 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l default: return false } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) - } + }) } func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { sh.HandleDeletedLocks(ctx, lockers) select { @@ -388,25 +319,26 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A default: return false } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) - } + }) } func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { if len(ids) == 0 { return } + idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } + if len(idMap) == 0 { return } + var failed bool var prm shard.ContainerSizePrm - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -418,7 +350,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid var drop []cid.ID for id := range idMap { prm.SetContainerID(id) - s, err := sh.ContainerSize(ctx, prm) + s, err := sh.ContainerSize(prm) if err != nil { e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true @@ -433,15 +365,13 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return len(idMap) == 0 - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) - return - } + }) + if failed || len(idMap) == 0 { return } - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -459,13 +389,12 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return false - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) - return - } + }) + if failed { return } + for id := range idMap { e.metrics.DeleteContainerSize(id.EncodeToString()) } @@ -475,16 +404,19 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci if len(ids) == 0 { return } + idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } + if len(idMap) == 0 { return } + var failed bool var prm shard.ContainerCountPrm - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -511,15 +443,13 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return len(idMap) == 0 - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) - return - } + }) + if failed || len(idMap) == 0 { return } - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -537,13 +467,12 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return false - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) - return - } + }) + if failed { return } + for id := range idMap { e.metrics.DeleteContainerCount(id.EncodeToString()) } diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 0e268cd23..2d083a58c 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -11,7 +11,6 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -56,7 +55,7 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) addrs, err := Select(context.Background(), e, cnr, false, fs) @@ -86,7 +85,7 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) addrs, err := Select(context.Background(), e, cnr, false, fs) @@ -129,7 +128,7 @@ func TestStorageEngine_ECInhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress) - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) var alreadyRemoved *apistatus.ObjectAlreadyRemoved @@ -174,7 +173,7 @@ func TestInhumeExpiredRegularObject(t *testing.T) { var prm InhumePrm prm.WithTarget(ts, object.AddressOf(obj)) - err := engine.Inhume(context.Background(), prm) + _, err := engine.Inhume(context.Background(), prm) require.NoError(t, err) }) @@ -183,7 +182,7 @@ func TestInhumeExpiredRegularObject(t *testing.T) { var prm InhumePrm prm.MarkAsGarbage(object.AddressOf(obj)) - err := engine.Inhume(context.Background(), prm) + _, err := engine.Inhume(context.Background(), prm) require.NoError(t, err) }) } @@ -206,7 +205,7 @@ func BenchmarkInhumeMultipart(b *testing.B) { func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { b.StopTimer() - engine := testNewEngine(b). + engine := testNewEngine(b, WithShardPoolSize(uint32(numObjects))). setShardsNum(b, numShards).prepare(b).engine defer func() { require.NoError(b, engine.Close(context.Background())) }() @@ -238,105 +237,8 @@ func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { prm.WithTarget(ts, addrs...) b.StartTimer() - err := engine.Inhume(context.Background(), prm) + _, err := engine.Inhume(context.Background(), prm) require.NoError(b, err) b.StopTimer() } } - -func TestInhumeIfObjectDoesntExist(t *testing.T) { - const numShards = 4 - - engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine - t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) - - t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, false, false) - }) - t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, true, false) - }) - t.Run("force inhume", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, false, true) - }) - - t.Run("object is locked", func(t *testing.T) { - t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, false, false) - }) - t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, true, false) - }) - t.Run("force inhume", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, false, true) - }) - }) -} - -func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { - t.Parallel() - - object := oidtest.Address() - require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) - - err := testHeadObject(e, object) - if withTombstone { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } -} - -func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { - t.Parallel() - - object := oidtest.Address() - require.NoError(t, testLockObject(e, object)) - - err := testInhumeObject(t, e, object, withTombstone, withForce) - if !withForce { - var errLocked *apistatus.ObjectLocked - require.ErrorAs(t, err, &errLocked) - return - } - require.NoError(t, err) - - err = testHeadObject(e, object) - if withTombstone { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } -} - -func testLockObject(e *StorageEngine, obj oid.Address) error { - return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) -} - -func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { - tombstone := oidtest.Address() - tombstone.SetContainer(obj.Container()) - - // Due to the tests design it is possible to set both the options, - // however removal with tombstone and force removal are exclusive. - require.False(t, withTombstone && withForce) - - var inhumePrm InhumePrm - if withTombstone { - inhumePrm.WithTarget(tombstone, obj) - } else { - inhumePrm.MarkAsGarbage(obj) - } - if withForce { - inhumePrm.WithForceRemoval() - } - return e.Inhume(context.Background(), inhumePrm) -} - -func testHeadObject(e *StorageEngine, obj oid.Address) error { - var headPrm HeadPrm - headPrm.WithAddress(obj) - - _, err := e.Head(context.Background(), headPrm) - return err -} diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 3b0cf74f9..5d43e59df 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -41,19 +41,11 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) - if err != nil { - return err - } - switch st { + switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: - st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) - if err != nil { - return err - } - switch st { + switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: @@ -69,13 +61,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { // code is pretty similar to inhumeAddr, maybe unify? root := false var addrLocked oid.Address addrLocked.SetContainer(idCnr) addrLocked.SetObject(locked) - retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) { defer func() { // if object is root we continue since information about it // can be presented in other shards @@ -92,11 +84,17 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError if errors.As(err, &eiErr) { - eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) - if !ok { - return false + eclocked := []oid.ID{locked} + for _, chunk := range eiErr.ECInfo().Chunks { + var objID oid.ID + err = objID.ReadFromV2(chunk.ID) + if err != nil { + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) + return false + } + eclocked = append(eclocked, objID) } - err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), @@ -139,18 +137,3 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo }) return } - -func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err := objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return nil, false - } - eclocked = append(eclocked, objID) - } - return eclocked, true -} diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index b8c9d6b1d..7bb9e3934 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -114,7 +114,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) var objLockedErr *apistatus.ObjectLocked - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -127,7 +127,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombForLockAddr, lockerAddr) - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.ErrorIs(t, err, meta.ErrLockObjectRemoval) // 5. @@ -136,7 +136,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) require.Eventually(t, func() bool { - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -200,7 +200,7 @@ func TestLockExpiration(t *testing.T) { inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 3. @@ -212,7 +212,7 @@ func TestLockExpiration(t *testing.T) { inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) require.Eventually(t, func() bool { - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -270,12 +270,12 @@ func TestLockForceRemoval(t *testing.T) { inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -283,12 +283,13 @@ func TestLockForceRemoval(t *testing.T) { deletePrm.WithAddress(objectcore.AddressOf(lock)) deletePrm.WithForceRemoval() - require.NoError(t, e.Delete(context.Background(), deletePrm)) + _, err = e.Delete(context.Background(), deletePrm) + require.NoError(t, err) // 5. inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - err = e.Inhume(context.Background(), inhumePrm) + _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) } diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go index 963292d83..75936206d 100644 --- a/pkg/local_object_storage/engine/metrics.go +++ b/pkg/local_object_storage/engine/metrics.go @@ -7,12 +7,34 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) -type ( - MetricRegister = metrics.EngineMetrics - GCMetrics = metrics.GCMetrics - WriteCacheMetrics = metrics.WriteCacheMetrics - NullBool = metrics.NullBool -) +type MetricRegister interface { + AddMethodDuration(method string, d time.Duration) + + SetObjectCounter(shardID, objectType string, v uint64) + AddToObjectCounter(shardID, objectType string, delta int) + + SetMode(shardID string, mode mode.Mode) + + AddToContainerSize(cnrID string, size int64) + DeleteContainerSize(cnrID string) + DeleteContainerCount(cnrID string) + AddToPayloadCounter(shardID string, size int64) + IncErrorCounter(shardID string) + ClearErrorCounter(shardID string) + DeleteShardMetrics(shardID string) + + SetContainerObjectCounter(shardID, contID, objectType string, v uint64) + IncContainerObjectCounter(shardID, contID, objectType string) + SubContainerObjectCounter(shardID, contID, objectType string, v uint64) + + IncRefillObjectsCount(shardID, path string, size int, success bool) + SetRefillPercent(shardID, path string, percent uint32) + SetRefillStatus(shardID, path, status string) + SetEvacuationInProgress(shardID string, value bool) + + WriteCache() metrics.WriteCacheMetrics + GC() metrics.GCMetrics +} func elapsed(method string, addFunc func(method string, d time.Duration)) func() { t := time.Now() @@ -54,9 +76,9 @@ type ( ) var ( - _ MetricRegister = noopMetrics{} - _ WriteCacheMetrics = noopWriteCacheMetrics{} - _ GCMetrics = noopGCMetrics{} + _ MetricRegister = noopMetrics{} + _ metrics.WriteCacheMetrics = noopWriteCacheMetrics{} + _ metrics.GCMetrics = noopGCMetrics{} ) func (noopMetrics) AddMethodDuration(string, time.Duration) {} @@ -77,8 +99,8 @@ func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {} func (noopMetrics) SetRefillPercent(string, string, uint32) {} func (noopMetrics) SetRefillStatus(string, string, string) {} func (noopMetrics) SetEvacuationInProgress(string, bool) {} -func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} } -func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} } +func (noopMetrics) WriteCache() metrics.WriteCacheMetrics { return noopWriteCacheMetrics{} } +func (noopMetrics) GC() metrics.GCMetrics { return noopGCMetrics{} } func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {} func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 10cf5ffd5..c79b6e251 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -9,6 +9,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -96,19 +98,17 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } var shRes putToShardRes - if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() - _, ok := e.shards[sh.ID().String()] + pool, ok := e.shardPools[sh.ID().String()] e.mtx.RUnlock() if !ok { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) + shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown - }); err != nil { - return err - } + }) switch shRes.status { case putToShardUnknown: return errPutShard @@ -123,59 +123,73 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. -func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, +func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, ) (res putToShardRes) { - var existPrm shard.ExistsPrm - existPrm.Address = addr + exitCh := make(chan struct{}) - exists, err := sh.Exists(ctx, existPrm) - if err != nil { - if shard.IsErrObjectExpired(err) { - // object is already found but - // expired => do nothing with it + if err := pool.Submit(func() { + defer close(exitCh) + + var existPrm shard.ExistsPrm + existPrm.Address = addr + + exists, err := sh.Exists(ctx, existPrm) + if err != nil { + if shard.IsErrObjectExpired(err) { + // object is already found but + // expired => do nothing with it + res.status = putToShardExists + } else { + e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + } + + return // this is not ErrAlreadyRemoved error so we can go to the next shard + } + + if exists.Exists() { res.status = putToShardExists - } else { - e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - } - - return // this is not ErrAlreadyRemoved error so we can go to the next shard - } - - if exists.Exists() { - res.status = putToShardExists - return - } - - var putPrm shard.PutPrm - putPrm.SetObject(obj) - putPrm.SetIndexAttributes(isIndexedContainer) - - _, err = sh.Put(ctx, putPrm) - if err != nil { - if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || - errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - return - } - if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - res.status = putToShardRemoved - res.err = err return } - e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) - return + var putPrm shard.PutPrm + putPrm.SetObject(obj) + putPrm.SetIndexAttributes(isIndexedContainer) + + _, err = sh.Put(ctx, putPrm) + if err != nil { + if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || + errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + return + } + if client.IsErrObjectAlreadyRemoved(err) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + res.status = putToShardRemoved + res.err = err + return + } + + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) + return + } + + res.status = putToShardSuccess + }); err != nil { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err)) + close(exitCh) } - res.status = putToShardSuccess + <-exitCh return } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 7ec4742d8..600e7266c 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -93,9 +94,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error Engine: e, } - if err := it.tryGetWithMeta(ctx); err != nil { - return RngRes{}, err - } + it.tryGetWithMeta(ctx) if it.SplitInfo != nil { return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -111,9 +110,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } - if err := it.tryGetFromBlobstor(ctx); err != nil { - return RngRes{}, err - } + it.tryGetFromBlobstor(ctx) if it.Object == nil { return RngRes{}, it.OutError @@ -122,7 +119,8 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), zap.Error(it.MetaError), - zap.Stringer("address", prm.addr)) + zap.Stringer("address", prm.addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } @@ -161,8 +159,8 @@ type getRangeShardIterator struct { Engine *StorageEngine } -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.HasDegraded = i.HasDegraded || noMeta i.ShardPrm.SetIgnoreMeta(noMeta) @@ -213,13 +211,13 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { }) } -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { +func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already processed it without a metabase. return false diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go index a29dd7ed9..83c6a54ed 100644 --- a/pkg/local_object_storage/engine/rebuild.go +++ b/pkg/local_object_storage/engine/rebuild.go @@ -4,7 +4,6 @@ import ( "context" "sync" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" @@ -42,7 +41,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } resGuard := &sync.Mutex{} - concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)} + limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit) eg, egCtx := errgroup.WithContext(ctx) for _, shardID := range prm.ShardIDs { @@ -62,7 +61,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{ - ConcurrencyLimiter: concLimiter, + ConcurrencyLimiter: limiter, TargetFillPercent: prm.TargetFillPercent, }) @@ -89,20 +88,3 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } return res, nil } - -type concurrencyLimiter struct { - semaphore chan struct{} -} - -func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { - select { - case l.semaphore <- struct{}{}: - return l.releaseWorkSlot, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -func (l *concurrencyLimiter) releaseWorkSlot() { - <-l.semaphore -} diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 4243a5481..02149b4c8 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -54,9 +54,8 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var sErr error - res, sErr = e._select(ctx, prm) - return sErr + res, err = e._select(ctx, prm) + return err }) return @@ -66,11 +65,13 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) + var outError error + var shPrm shard.SelectPrm shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -85,13 +86,11 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, } return false - }); err != nil { - return SelectRes{}, err - } + }) return SelectRes{ addrList: addrList, - }, nil + }, outError } // List returns `limit` available physically storage object addresses in engine. @@ -101,9 +100,8 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var lErr error - res, lErr = e.list(ctx, limit) - return lErr + res, err = e.list(ctx, limit) + return err }) return @@ -115,7 +113,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro ln := uint64(0) // consider iterating over shuffled shards - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -134,9 +132,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro } return false - }); err != nil { - return SelectRes{}, err - } + }) return SelectRes{ addrList: addrList, diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 69067c500..6d4844b75 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -11,12 +11,10 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/hrw" "github.com/google/uuid" + "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) @@ -118,7 +116,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } - e.metrics.SetMode(sh.ID().String(), sh.GetMode()) + e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } @@ -180,6 +178,11 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { e.mtx.Lock() defer e.mtx.Unlock() + pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) + if err != nil { + return fmt.Errorf("create pool: %w", err) + } + strID := sh.ID().String() if _, ok := e.shards[strID]; ok { return fmt.Errorf("shard with id %s was already added", strID) @@ -193,6 +196,8 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { hash: hrw.StringHash(strID), } + e.shardPools[strID] = pool + return nil } @@ -217,6 +222,12 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { ss = append(ss, sh) delete(e.shards, id) + pool, ok := e.shardPools[id] + if ok { + pool.Release() + delete(e.shardPools, id) + } + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", id)) } @@ -261,7 +272,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) h := hrw.StringHash(objAddr.EncodeToString()) shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, sh) + shards = append(shards, hashedShard(sh)) } hrw.SortHasherSliceByValue(shards, h) return shards @@ -274,38 +285,26 @@ func (e *StorageEngine) unsortedShards() []hashedShard { shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, sh) + shards = append(shards, hashedShard(sh)) } return shards } -func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { +func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) { for i, sh := range e.sortShards(addr) { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } if handler(i, sh) { break } } - return nil } -func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { +func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) { for _, sh := range e.unsortedShards() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } if handler(sh) { break } } - return nil } // SetShardMode sets mode of the shard with provided identifier. @@ -330,6 +329,8 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M // HandleNewEpoch notifies every shard about NewEpoch event. func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { + ev := shard.EventNewEpoch(epoch) + e.mtx.RLock() defer e.mtx.RUnlock() @@ -337,7 +338,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { select { case <-ctx.Done(): return - case sh.NotificationChannel() <- epoch: + case sh.NotificationChannel() <- ev: default: e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) @@ -425,6 +426,12 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha delete(e.shards, idStr) + pool, ok := e.shardPools[idStr] + if ok { + pool.Release() + delete(e.shardPools, idStr) + } + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } @@ -435,48 +442,3 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha func (s hashedShard) Hash() uint64 { return s.hash } - -func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) { - var err error - var info []shard.Info - prm := shard.ExistsPrm{ - Address: obj, - } - var siErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - - if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { - res, exErr := hs.Exists(ctx, prm) - if exErr != nil { - if client.IsErrObjectAlreadyRemoved(exErr) { - err = new(apistatus.ObjectAlreadyRemoved) - return true - } - - // Check if error is either SplitInfoError or ECInfoError. - // True means the object is virtual. - if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) { - info = append(info, hs.DumpInfo()) - return false - } - - if shard.IsErrObjectExpired(exErr) { - err = exErr - return true - } - - if !client.IsErrObjectNotFound(exErr) { - e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address)) - } - - return false - } - if res.Exists() { - info = append(info, hs.DumpInfo()) - } - return false - }); itErr != nil { - return nil, itErr - } - return info, err -} diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 3aa9629b0..0bbc7563c 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -17,6 +17,7 @@ func TestRemoveShard(t *testing.T) { e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() + require.Equal(t, numOfShards, len(e.shardPools)) require.Equal(t, numOfShards, len(e.shards)) removedNum := numOfShards / 2 @@ -36,6 +37,7 @@ func TestRemoveShard(t *testing.T) { } } + require.Equal(t, numOfShards-removedNum, len(e.shardPools)) require.Equal(t, numOfShards-removedNum, len(e.shards)) for id, removed := range mSh { diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index cfd15b4d4..268b4adfa 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.opentelemetry.io/otel/attribute" @@ -38,7 +39,8 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return nil, err @@ -71,7 +73,8 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return nil, err } @@ -99,7 +102,8 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return err } @@ -126,7 +130,8 @@ func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeI if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return err } @@ -157,7 +162,8 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } @@ -189,7 +195,8 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } @@ -220,7 +227,8 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } @@ -230,7 +238,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree } // TreeSortedByFilename implements the pilorama.Forest interface. -func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { +func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", trace.WithAttributes( attribute.String("container_id", cid.EncodeToString()), @@ -241,7 +249,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, var err error var nodes []pilorama.MultiNodeInfo - var cursor *pilorama.Cursor + var cursor *string for _, sh := range e.sortShards(cid) { nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) if err != nil { @@ -251,7 +259,8 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } @@ -282,7 +291,8 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } @@ -311,7 +321,8 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } @@ -339,7 +350,8 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, } e.reportShardError(ctx, sh, "can't perform `TreeList`", err, - zap.Stringer("cid", cid)) + zap.Stringer("cid", cid), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) // returns as much info about // trees as possible @@ -405,7 +417,8 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return err } @@ -431,7 +444,8 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't read tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID)) + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } continue } diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go index 52b199b0b..383c596af 100644 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ b/pkg/local_object_storage/internal/testutil/generators.go @@ -1,9 +1,7 @@ package testutil import ( - cryptorand "crypto/rand" "encoding/binary" - "math/rand" "sync/atomic" "testing" @@ -11,6 +9,7 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" ) // AddressGenerator is the interface of types that generate object addresses. @@ -62,7 +61,7 @@ var _ ObjectGenerator = &SeqObjGenerator{} func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { data := make([]byte, sz) - _, _ = cryptorand.Read(data) + _, _ = rand.Read(data) obj := GenerateObjectWithCIDWithPayload(cid, data) obj.SetID(oid) return obj @@ -83,7 +82,7 @@ var _ ObjectGenerator = &RandObjGenerator{} func (g *RandObjGenerator) Next() *objectSDK.Object { var id oid.ID - _, _ = cryptorand.Read(id[:]) + _, _ = rand.Read(id[:]) return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) } diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go index 1087e40be..60e9211d5 100644 --- a/pkg/local_object_storage/internal/testutil/object.go +++ b/pkg/local_object_storage/internal/testutil/object.go @@ -1,7 +1,6 @@ package testutil import ( - "crypto/rand" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -12,6 +11,7 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" + "golang.org/x/exp/rand" ) const defaultDataSize = 32 diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go deleted file mode 100644 index de1479e6f..000000000 --- a/pkg/local_object_storage/metabase/bucket_cache.go +++ /dev/null @@ -1,82 +0,0 @@ -package meta - -import ( - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.etcd.io/bbolt" -) - -type bucketCache struct { - locked *bbolt.Bucket - graveyard *bbolt.Bucket - garbage *bbolt.Bucket - expired map[cid.ID]*bbolt.Bucket - primary map[cid.ID]*bbolt.Bucket -} - -func newBucketCache() *bucketCache { - return &bucketCache{} -} - -func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { - if bc == nil { - return tx.Bucket(bucketNameLocked) - } - return getBucket(&bc.locked, tx, bucketNameLocked) -} - -func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { - if bc == nil { - return tx.Bucket(graveyardBucketName) - } - return getBucket(&bc.graveyard, tx, graveyardBucketName) -} - -func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { - if bc == nil { - return tx.Bucket(garbageBucketName) - } - return getBucket(&bc.garbage, tx, garbageBucketName) -} - -func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { - if *cache != nil { - return *cache - } - - *cache = tx.Bucket(name) - return *cache -} - -func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { - if bc == nil { - bucketName := make([]byte, bucketKeySize) - bucketName = objectToExpirationEpochBucketName(cnr, bucketName) - return tx.Bucket(bucketName) - } - return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr) -} - -func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { - if bc == nil { - bucketName := make([]byte, bucketKeySize) - bucketName = primaryBucketName(cnr, bucketName) - return tx.Bucket(bucketName) - } - return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr) -} - -func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { - value, ok := (*m)[cnr] - if ok { - return value - } - - if *m == nil { - *m = make(map[cid.ID]*bbolt.Bucket, 1) - } - - bucketName := make([]byte, bucketKeySize) - bucketName = nameFunc(cnr, bucketName) - (*m)[cnr] = getBucket(&value, tx, bucketName) - return value -} diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go index da27e6085..472b2affc 100644 --- a/pkg/local_object_storage/metabase/containers.go +++ b/pkg/local_object_storage/metabase/containers.go @@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) { return result, err } -func (db *DB) ContainerSize(id cid.ID) (uint64, error) { +func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -64,22 +64,21 @@ func (db *DB) ContainerSize(id cid.ID) (uint64, error) { return 0, ErrDegradedMode } - var size uint64 - err := db.boltDB.View(func(tx *bbolt.Tx) error { - size = db.containerSize(tx, id) + err = db.boltDB.View(func(tx *bbolt.Tx) error { + size, err = db.containerSize(tx, id) - return nil + return err }) return size, metaerr.Wrap(err) } -func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 { +func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) { containerVolume := tx.Bucket(containerVolumeBucketName) key := make([]byte, cidSize) id.Encode(key) - return parseContainerSize(containerVolume.Get(key)) + return parseContainerSize(containerVolume.Get(key)), nil } func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool { diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index 732f99519..f29dafe77 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -251,13 +251,13 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { return db.incContainerObjectCounter(tx, cnrID, isUserObject) } -func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error { +func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error { b := tx.Bucket(shardInfoBucket) if b == nil { return nil } - return db.updateShardObjectCounterBucket(b, typ, delta, false) + return db.updateShardObjectCounterBucket(b, typ, delta, inc) } func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error { diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 9a5a6e574..00ee2baa3 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -161,21 +161,21 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { if res.phyCount > 0 { - err := db.decShardObjectCounter(tx, phy, res.phyCount) + err := db.updateShardObjectCounter(tx, phy, res.phyCount, false) if err != nil { return fmt.Errorf("decrease phy object counter: %w", err) } } if res.logicCount > 0 { - err := db.decShardObjectCounter(tx, logical, res.logicCount) + err := db.updateShardObjectCounter(tx, logical, res.logicCount, false) if err != nil { return fmt.Errorf("decrease logical object counter: %w", err) } } if res.userCount > 0 { - err := db.decShardObjectCounter(tx, user, res.userCount) + err := db.updateShardObjectCounter(tx, user, res.userCount, false) if err != nil { return fmt.Errorf("decrease user object counter: %w", err) } @@ -363,12 +363,12 @@ func (db *DB) deleteObject( func parentLength(tx *bbolt.Tx, addr oid.Address) int { bucketName := make([]byte, bucketKeySize) - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) + bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:])) if bkt == nil { return 0 } - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) + lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:]))) if err != nil { return 0 } @@ -376,12 +376,11 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int { return len(lst) } -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { +func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) { bkt := tx.Bucket(item.name) if bkt != nil { - return bkt.Delete(item.key) + _ = bkt.Delete(item.key) // ignore error, best effort there } - return nil } func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -406,16 +405,19 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { // if list empty, remove the key from bucket if len(lst) == 0 { - return bkt.Delete(item.key) + _ = bkt.Delete(item.key) // ignore error, best effort there + + return nil } // if list is not empty, then update it encodedLst, err := encodeList(lst) if err != nil { - return err + return nil // ignore error, best effort there } - return bkt.Put(item.key, encodedLst) + _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there + return nil } func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -478,47 +480,35 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error return ErrUnknownObjectType } - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: bucketName, key: objKey, - }); err != nil { - return err - } + }) } else { - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: parentBucketName(cnr, bucketName), key: objKey, - }); err != nil { - return err - } + }) } - if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index + delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index name: smallBucketName(cnr, bucketName), key: objKey, - }); err != nil { - return err - } - if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index + }) + delUniqueIndexItem(tx, namedBucketItem{ // remove from root index name: rootBucketName(cnr, bucketName), key: objKey, - }); err != nil { - return err - } + }) if expEpoch, ok := hasExpirationEpoch(obj); ok { - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: expEpochToObjectBucketName, key: expirationEpochKey(expEpoch, cnr, addr.Object()), - }); err != nil { - return err - } - if err := delUniqueIndexItem(tx, namedBucketItem{ + }) + delUniqueIndexItem(tx, namedBucketItem{ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), key: objKey, - }); err != nil { - return err - } + }) } return nil @@ -545,12 +535,10 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. // also drop EC parent root info if current EC chunk is the last one if !hasAnyChunks { - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }); err != nil { - return err - } + }) } if ech.ParentSplitParentID() == nil { @@ -584,10 +572,11 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. } // drop split info - return delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), }) + return nil } func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 7bd6f90a6..3133c5480 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "fmt" "time" @@ -153,16 +154,12 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE // - 2 if object is covered with tombstone; // - 3 if object is expired. func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { - return objectStatusWithCache(nil, tx, addr, currEpoch) -} - -func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { // locked object could not be removed/marked with GC/expired - if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { + if objectLocked(tx, addr.Container(), addr.Object()) { return 0, nil } - expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) + expired, err := isExpired(tx, addr, currEpoch) if err != nil { return 0, err } @@ -171,8 +168,8 @@ func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, curr return 3, nil } - graveyardBkt := getGraveyardBucket(bc, tx) - garbageBkt := getGarbageBucket(bc, tx) + graveyardBkt := tx.Bucket(graveyardBucketName) + garbageBkt := tx.Bucket(garbageBucketName) addrKey := addressKey(addr, make([]byte, addressKeySize)) return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil } @@ -230,7 +227,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e splitInfo := objectSDK.NewSplitInfo() - err := splitInfo.Unmarshal(rawSplitInfo) + err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo)) if err != nil { return nil, fmt.Errorf("unmarshal split info from root index: %w", err) } diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go index a1351cb6f..68144d8b1 100644 --- a/pkg/local_object_storage/metabase/expired.go +++ b/pkg/local_object_storage/metabase/expired.go @@ -74,11 +74,9 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A } func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - return isExpiredWithCache(nil, tx, addr, currEpoch) -} - -func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - b := getExpiredBucket(bc, tx, addr.Container()) + bucketName := make([]byte, bucketKeySize) + bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName) + b := tx.Bucket(bucketName) if b == nil { return false, nil } diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 821810c09..af274b245 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "fmt" "time" @@ -88,12 +89,8 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { } func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { - return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch) -} - -func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { if checkStatus { - st, err := objectStatusWithCache(bc, tx, addr, currEpoch) + st, err := objectStatus(tx, addr, currEpoch) if err != nil { return nil, err } @@ -113,13 +110,12 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key bucketName := make([]byte, bucketKeySize) // check in primary index - if b := getPrimaryBucket(bc, tx, cnr); b != nil { - if data := b.Get(key); len(data) != 0 { - return obj, obj.Unmarshal(data) - } + data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key) + if len(data) != 0 { + return obj, obj.Unmarshal(bytes.Clone(data)) } - data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) + data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) if len(data) != 0 { return nil, getECInfoError(tx, cnr, data) } @@ -127,13 +123,13 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key // if not found then check in tombstone index data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(data) + return obj, obj.Unmarshal(bytes.Clone(data)) } // if not found then check in locker index data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(data) + return obj, obj.Unmarshal(bytes.Clone(data)) } // if not found then check if object is a virtual @@ -189,7 +185,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD child := objectSDK.New() - err = child.Unmarshal(data) + err = child.Unmarshal(bytes.Clone(data)) if err != nil { return nil, fmt.Errorf("unmarshal child with parent: %w", err) } @@ -223,7 +219,7 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) if len(objData) != 0 { obj := objectSDK.New() - if err := obj.Unmarshal(objData); err != nil { + if err := obj.Unmarshal(bytes.Clone(objData)); err != nil { return err } chunk := objectSDK.ECChunk{} diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 76018fb61..99fdec310 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -342,10 +342,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I } func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { - if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil { + if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil { return err } - if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil { + if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil { return err } diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 9cccd7dad..0d438e102 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "errors" "strconv" @@ -129,7 +130,7 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) e } return b.ForEach(func(k, v []byte) error { - if oid.Decode(k) == nil && obj.Unmarshal(v) == nil { + if oid.Decode(k) == nil && obj.Unmarshal(bytes.Clone(v)) == nil { return f(cid, oid, obj) } diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 2a0bd7f6a..375d1cb1a 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -139,7 +139,8 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, var containerID cid.ID var offset []byte - bc := newBucketCache() + graveyardBkt := tx.Bucket(graveyardBucketName) + garbageBkt := tx.Bucket(garbageBucketName) rawAddr := make([]byte, cidSize, addressKeySize) @@ -168,7 +169,7 @@ loop: bkt := tx.Bucket(name) if bkt != nil { copy(rawAddr, cidRaw) - result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, + result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID, result, count, cursor, threshold, currEpoch) if err != nil { return nil, nil, err @@ -187,7 +188,8 @@ loop: if offset != nil { // new slice is much faster but less memory efficient // we need to copy, because offset exists during bbolt tx - cursor.inBucketOffset = bytes.Clone(offset) + cursor.inBucketOffset = make([]byte, len(offset)) + copy(cursor.inBucketOffset, offset) } if len(result) == 0 { @@ -196,17 +198,17 @@ loop: // new slice is much faster but less memory efficient // we need to copy, because bucketName exists during bbolt tx - cursor.bucketName = bytes.Clone(bucketName) + cursor.bucketName = make([]byte, len(bucketName)) + copy(cursor.bucketName, bucketName) return result, cursor, nil } // selectNFromBucket similar to selectAllFromBucket but uses cursor to find // object to start selecting from. Ignores inhumed objects. -func selectNFromBucket( - bc *bucketCache, - bkt *bbolt.Bucket, // main bucket +func selectNFromBucket(bkt *bbolt.Bucket, // main bucket objType objectSDK.Type, // type of the objects stored in the main bucket + graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets cidRaw []byte, // container ID prefix, optimization cnt cid.ID, // container ID to []objectcore.Info, // listing result @@ -219,6 +221,7 @@ func selectNFromBucket( cursor = new(Cursor) } + count := len(to) c := bkt.Cursor() k, v := c.First() @@ -230,7 +233,7 @@ func selectNFromBucket( } for ; k != nil; k, v = c.Next() { - if len(to) >= limit { + if count >= limit { break } @@ -240,19 +243,17 @@ func selectNFromBucket( } offset = k - graveyardBkt := getGraveyardBucket(bc, bkt.Tx()) - garbageBkt := getGarbageBucket(bc, bkt.Tx()) if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { continue } var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { + if err := o.Unmarshal(bytes.Clone(v)); err != nil { return nil, nil, nil, err } expEpoch, hasExpEpoch := hasExpirationEpoch(&o) - if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { + if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch { continue } @@ -274,6 +275,7 @@ func selectNFromBucket( a.SetContainer(cnt) a.SetObject(obj) to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) + count++ } return to, offset, cursor, nil @@ -421,7 +423,7 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, p var ecInfo *objectcore.ECInfo if prm.ObjectType == objectSDK.TypeRegular { var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { + if err := o.Unmarshal(bytes.Clone(v)); err != nil { return err } isLinkingObj = isLinkObject(&o) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 02985991c..817b22010 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -59,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { - if !errors.Is(err, meta.ErrEndOfListing) { + if errors.Is(err, meta.ErrEndOfListing) { b.Fatalf("error: %v", err) } prm.SetCursor(nil) diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index f4cb9e53b..b930a0141 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -4,10 +4,8 @@ import ( "bytes" "context" "fmt" - "slices" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -64,7 +62,9 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid. return ErrReadOnlyMode } - assert.False(len(locked) == 0, "empty locked list") + if len(locked) == 0 { + panic("empty locked list") + } err := db.lockInternal(locked, cnr, locker) success = err == nil @@ -162,11 +162,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { // checks if specified object is locked in the specified container. func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - return objectLockedWithCache(nil, tx, idCnr, idObj) -} - -func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - bucketLocked := getLockedBucket(bc, tx) + bucketLocked := tx.Bucket(bucketNameLocked) if bucketLocked != nil { key := make([]byte, cidSize) idCnr.Encode(key) @@ -254,7 +250,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres unlockedObjects = append(unlockedObjects, addr) } else { // exclude locker - keyLockers = slices.Delete(keyLockers, i, i+1) + keyLockers = append(keyLockers[:i], keyLockers[i+1:]...) v, err = encodeList(keyLockers) if err != nil { diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 5e1bbfe9e..16918c4d9 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "encoding/binary" "errors" @@ -319,7 +320,7 @@ func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName [] return si.Marshal() default: oldSI := objectSDK.NewSplitInfo() - if err := oldSI.Unmarshal(old); err != nil { + if err := oldSI.Unmarshal(bytes.Clone(old)); err != nil { return nil, err } si = util.MergeSplitInfo(si, oldSI) diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 5f0956f0b..45faecc13 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -37,7 +37,7 @@ func TestResetDropsContainerBuckets(t *testing.T) { for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) - putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) + putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) } diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 60da50671..9f1b8b060 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -131,7 +131,6 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters res := make([]oid.Address, 0, len(mAddr)) - bc := newBucketCache() for a, ind := range mAddr { if ind != expLen { continue // ignore objects with unmatched fast filters @@ -146,7 +145,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters var addr oid.Address addr.SetContainer(cnr) addr.SetObject(id) - st, err := objectStatusWithCache(bc, tx, addr, currEpoch) + st, err := objectStatus(tx, addr, currEpoch) if err != nil { return nil, err } @@ -154,7 +153,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters continue // ignore removed objects } - addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) + addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) if !match { continue // ignore objects with unmatched slow filters } @@ -452,13 +451,13 @@ func (db *DB) selectObjectID( } // matchSlowFilters return true if object header is matched by all slow filters. -func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { +func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { result := addr if len(f) == 0 { return result, true } - obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) + obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch) if err != nil { return result, false } @@ -516,9 +515,9 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, return result, true } -func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { +func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { buf := make([]byte, addressKeySize) - obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) + obj, err := db.get(tx, addr, buf, true, false, currEpoch) if err != nil { var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { @@ -528,7 +527,7 @@ func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Ad continue } addr.SetObject(objID) - obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) + obj, err = db.get(tx, addr, buf, true, false, currEpoch) if err == nil { return obj, true, nil } diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index ce2156d2e..5cc998311 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -1216,8 +1216,6 @@ func TestExpiredObjects(t *testing.T) { } func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { - b.ReportAllocs() - var prm meta.SelectPrm prm.SetContainerID(cid) prm.SetFilters(fs) diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go index 8f2376503..6d620b41a 100644 --- a/pkg/local_object_storage/metabase/storage_id.go +++ b/pkg/local_object_storage/metabase/storage_id.go @@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte { // StorageID returns storage descriptor for objects from the blobstor. // It is put together with the object can makes get/delete operation faster. -func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) { +func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) { var ( startedAt = time.Now() success = false @@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, er db.modeMtx.RLock() defer db.modeMtx.RUnlock() - var res StorageIDRes if db.mode.NoMetabase() { return res, ErrDegradedMode } - err := db.boltDB.View(func(tx *bbolt.Tx) error { - res.id = db.storageID(tx, prm.addr) - return nil + err = db.boltDB.View(func(tx *bbolt.Tx) error { + res.id, err = db.storageID(tx, prm.addr) + + return err }) success = err == nil return res, metaerr.Wrap(err) } -func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte { +func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) { key := make([]byte, bucketKeySize) smallBucket := tx.Bucket(smallBucketName(addr.Container(), key)) if smallBucket == nil { - return nil + return nil, nil } storageID := smallBucket.Get(objectKey(addr.Object(), key)) if storageID == nil { - return nil + return nil, nil } - return bytes.Clone(storageID) + return bytes.Clone(storageID), nil } // UpdateStorageIDPrm groups the parameters of UpdateStorageID operation. diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index 4948f3424..6eba58c69 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -360,7 +360,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv return nil } last = keys[len(keys)-1] - cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys) + cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys) if err != nil { log("deleting user attribute buckets completed with an error:", err) return err @@ -376,8 +376,8 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv } } -func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { - keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs) +func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { + keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs) if err != nil { return 0, fmt.Errorf("select non indexed user attributes: %w", err) } @@ -394,7 +394,7 @@ func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs co return uint64(len(keysToDrop)), nil } -func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) { +func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) { var keysToDrop [][]byte for _, key := range keys { attr, ok := attributeFromAttributeBucket(key) @@ -409,7 +409,7 @@ func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs contai if !ok { return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) } - info, err := cs.Info(ctx, contID) + info, err := cs.Info(contID) if err != nil { return nil, err } diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index c90de4dd6..5444264be 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -45,7 +45,7 @@ func TestUpgradeV2ToV3(t *testing.T) { type testContainerInfoProvider struct{} -func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) { +func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) { return container.Info{}, nil } diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 4ad83332b..80851f1c4 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -279,7 +278,9 @@ func objectKey(obj oid.ID, key []byte) []byte { // // firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") + if len(objs) == 0 { + panic("empty object list in firstIrregularObjectType") + } var keys [2][1 + cidSize]byte diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 897b37ea0..86b19e3af 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -419,7 +419,10 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri return err } - i, node := t.getPathPrefix(bTree, attr, path) + i, node, err := t.getPathPrefix(bTree, attr, path) + if err != nil { + return err + } ts := t.getLatestTimestamp(bLog, d.Position, d.Size) lm = make([]Move, len(path)-i+1) @@ -977,7 +980,10 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st b := treeRoot.Bucket(dataBucket) - i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) + i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) + if err != nil { + return err + } if i < len(path)-1 { return nil } @@ -1077,7 +1083,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol } // TreeSortedByFilename implements the Forest interface. -func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { +func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) { var ( startedAt = time.Now() success = false @@ -1155,7 +1161,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = NewCursor(s, res[len(res)-1].LastChild()) + last = &s } return res, last, metaerr.Wrap(err) } @@ -1166,10 +1172,10 @@ func sortByFilename(nodes []NodeInfo) { }) } -func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { +func sortAndCut(result []NodeInfo, last *string) []NodeInfo { var lastBytes []byte if last != nil { - lastBytes = []byte(last.GetFilename()) + lastBytes = []byte(*last) } sortByFilename(result) @@ -1506,7 +1512,8 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* }) if len(res.Items) == batchSize { - res.NextPageToken = bytes.Clone(k) + res.NextPageToken = make([]byte, len(k)) + copy(res.NextPageToken, k) break } } @@ -1519,7 +1526,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* return &res, nil } -func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) { +func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) { c := bTree.Cursor() var curNodes []Node @@ -1542,14 +1549,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin } if len(nextNodes) == 0 { - return i, curNodes + return i, curNodes, nil } } - return len(path), nextNodes + return len(path), nextNodes, nil } -func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) { +func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) { c := bTree.Cursor() var curNode Node @@ -1569,10 +1576,10 @@ loop: childKey, value = c.Next() } - return i, curNode + return i, curNode, nil } - return len(path), curNode + return len(path), curNode, nil } func (t *boltForest) moveFromBytes(m *Move, data []byte) error { @@ -1582,12 +1589,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error { lm.Child = binary.LittleEndian.Uint64(data) lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.FromBytes(data[16:]) + return lm.Meta.FromBytes(data[16:]) } func (t *boltForest) logToBytes(lm *Move) []byte { w := io.NewBufBinWriter() - size := 8 + 8 + lm.Size() + 1 + size := 8 + 8 + lm.Meta.Size() + 1 // if lm.HasOld { // size += 8 + lm.Old.Meta.Size() // } @@ -1595,7 +1602,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte { w.Grow(size) w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Parent) - lm.EncodeBinary(w.BinWriter) + lm.Meta.EncodeBinary(w.BinWriter) // w.WriteBool(lm.HasOld) // if lm.HasOld { // w.WriteU64LE(lm.Old.Parent) diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index ebfd0bcc0..f31504e2b 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "slices" "sort" "strings" @@ -85,7 +84,8 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID s.operations = append(s.operations, op) } - mCopy := slices.Clone(m) + mCopy := make([]KeyValue, len(m)) + copy(mCopy, m) op := s.do(&Move{ Parent: node, Meta: Meta{ @@ -164,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, } // TreeSortedByFilename implements the Forest interface. -func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { +func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { @@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI var res []NodeInfo for _, nodeID := range nodeIDs { - children := s.getChildren(nodeID) + children := s.tree.getChildren(nodeID) for _, childID := range children { var found bool for _, kv := range s.infoMap[childID].Meta.Items { @@ -204,14 +204,17 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI r := mergeNodeInfos(res) for i := range r { - if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { - finish := min(len(res), i+count) + if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start { + finish := i + count + if len(res) < finish { + finish = len(res) + } last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], NewCursor(last, 0), nil + return r[i:finish], &last, nil } } last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, NewCursor(last, 0), nil + return nil, &last, nil } // TreeGetChildren implements the Forest interface. @@ -222,7 +225,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str return nil, ErrTreeNotFound } - children := s.getChildren(nodeID) + children := s.tree.getChildren(nodeID) res := make([]NodeInfo, 0, len(children)) for _, childID := range children { res = append(res, NodeInfo{ diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index 844084c55..de56fc82b 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { + treeAppend := func(t *testing.T, last *string, count int) *string { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) @@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { + treeAppend := func(t *testing.T, last *string, count int) *string { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go index b035be1e1..5a00bcf7a 100644 --- a/pkg/local_object_storage/pilorama/heap.go +++ b/pkg/local_object_storage/pilorama/heap.go @@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any { // fixedHeap maintains a fixed number of smallest elements started at some point. type fixedHeap struct { - start *Cursor + start *string sorted bool count int h *filenameHeap } -func newHeap(start *Cursor, count int) *fixedHeap { +func newHeap(start *string, count int) *fixedHeap { h := new(filenameHeap) heap.Init(h) @@ -50,19 +50,8 @@ func newHeap(start *Cursor, count int) *fixedHeap { const amortizationMultiplier = 5 func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil { - if filename < h.start.GetFilename() { - return false - } else if filename == h.start.GetFilename() { - // A tree may have a lot of nodes with the same filename but different versions so that - // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call - // with the same filename. - pos := slices.Index(id, h.start.GetNode()) - if pos == -1 || pos+1 >= len(id) { - return false - } - id = id[pos+1:] - } + if h.start != nil && filename <= *h.start { + return false } *h.h = append(*h.h, heapInfo{id: id, filename: filename}) diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index 28b7faec8..ce7b3db1e 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree { // undo un-does op and changes s in-place. func (s *memoryTree) undo(op *move) { if op.HasOld { - s.infoMap[op.Child] = op.Old + s.tree.infoMap[op.Child] = op.Old } else { - delete(s.infoMap, op.Child) + delete(s.tree.infoMap, op.Child) } } @@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move { }, } - shouldPut := !s.isAncestor(op.Child, op.Parent) - p, ok := s.infoMap[op.Child] + shouldPut := !s.tree.isAncestor(op.Child, op.Parent) + p, ok := s.tree.infoMap[op.Child] if ok { lm.HasOld = true lm.Old = p @@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move { p.Meta = m p.Parent = op.Parent - s.infoMap[op.Child] = p + s.tree.infoMap[op.Child] = p return lm } @@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { } var nodes []Node - var lastTS Timestamp + var lastTs Timestamp children := t.getChildren(curNode) for i := range children { @@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { fileName := string(info.Meta.GetAttr(attr)) if fileName == path[len(path)-1] { if latest { - if info.Meta.Time >= lastTS { + if info.Meta.Time >= lastTs { nodes = append(nodes[:0], children[i]) } } else { diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index e1f6cd8e7..1f7e742a2 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -37,7 +37,7 @@ type Forest interface { TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute.. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) + TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) // TreeGetOpLog returns first log operation stored at or above the height. // In case no such operation is found, empty Move and nil error should be returned. TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) @@ -79,38 +79,6 @@ const ( AttributeVersion = "Version" ) -// Cursor keeps state between function calls for traversing nodes. -// It stores the attributes associated with a previous call, allowing subsequent operations -// to resume traversal from this point rather than starting from the beginning. -type Cursor struct { - // Last traversed filename. - filename string - - // Last traversed node. - node Node -} - -func NewCursor(filename string, node Node) *Cursor { - return &Cursor{ - filename: filename, - node: node, - } -} - -func (c *Cursor) GetFilename() string { - if c == nil { - return "" - } - return c.filename -} - -func (c *Cursor) GetNode() Node { - if c == nil { - return Node(0) - } - return c.node -} - // CIDDescriptor contains container ID and information about the node position // in the list of container nodes. type CIDDescriptor struct { diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go index 36d347f10..106ba6ae9 100644 --- a/pkg/local_object_storage/pilorama/multinode.go +++ b/pkg/local_object_storage/pilorama/multinode.go @@ -25,10 +25,6 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool { return true } -func (r *MultiNodeInfo) LastChild() Node { - return r.Children[len(r.Children)-1] -} - func (n NodeInfo) ToMultiNode() MultiNodeInfo { return MultiNodeInfo{ Children: MultiNode{n.ID}, diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go index eecee1527..54c2b90a6 100644 --- a/pkg/local_object_storage/pilorama/split_test.go +++ b/pkg/local_object_storage/pilorama/split_test.go @@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) { require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4")) require.Equal(t, []byte{10}, testGetByPath(t, "value0")) - testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) { + testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) { res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize) require.NoError(t, err) return res, last diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index b4015ae8d..0309f0c81 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 { return r.size } -func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { s.m.RLock() defer s.m.RUnlock() @@ -34,12 +34,6 @@ func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (Contai return ContainerSizeRes{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ContainerSizeRes{}, err - } - defer release() - size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) @@ -75,12 +69,6 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont return ContainerCountRes{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ContainerCountRes{}, err - } - defer release() - counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) @@ -112,12 +100,6 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.metaBase.DeleteContainerSize(ctx, id) } @@ -140,11 +122,5 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.metaBase.DeleteContainerCount(ctx, id) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index d489b8b0d..1c1933af5 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { - return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly) + return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly)) } return nil } @@ -108,17 +108,19 @@ func (s *Shard) Init(ctx context.Context) error { s.updateMetrics(ctx) s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - newEpochChan: make(chan uint64), - newEpochHandlers: &newEpochHandlers{ - cancelFunc: func() {}, - handlers: []newEpochHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, + gcCfg: &s.gcCfg, + remover: s.removeGarbage, + stopChannel: make(chan struct{}), + eventChan: make(chan Event), + mEventHandler: map[eventType]*eventHandlers{ + eventNewEpoch: { + cancelFunc: func() {}, + handlers: []eventHandler{ + s.collectExpiredLocks, + s.collectExpiredObjects, + s.collectExpiredTombstones, + s.collectExpiredMetrics, + }, }, }, } @@ -214,8 +216,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - if s.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.refillMetabaseWorkersCount) + if s.cfg.refillMetabaseWorkersCount > 0 { + eg.SetLimit(s.cfg.refillMetabaseWorkersCount) } var completedCount uint64 @@ -278,7 +280,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, var isIndexedContainer bool if hasIndexedAttribute { - info, err := s.containerInfo.Info(ctx, addr.Container()) + info, err := s.containerInfo.Info(addr.Container()) if err != nil { return err } @@ -363,7 +365,6 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object // Close releases all Shard's components. func (s *Shard) Close(ctx context.Context) error { - unlock := s.lockExclusive() if s.rb != nil { s.rb.Stop(ctx, s.log) } @@ -389,14 +390,6 @@ func (s *Shard) Close(ctx context.Context) error { } } - if s.opsLimiter != nil { - s.opsLimiter.Close() - } - - unlock() - - // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. - // So to prevent deadlock GC stopping is outside of exclusive lock. // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { s.gc.stop(ctx) @@ -452,10 +445,6 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { return err } } - if c.opsLimiter != nil { - s.opsLimiter.Close() - s.opsLimiter = c.opsLimiter - } return s.setMode(ctx, c.info.Mode) } diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go index 8dc1f0522..b3bc6a30b 100644 --- a/pkg/local_object_storage/shard/count.go +++ b/pkg/local_object_storage/shard/count.go @@ -23,12 +23,6 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { return 0, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() - cc, err := s.metaBase.ObjectCounters() if err != nil { return 0, err diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 0101817a8..fb6769b51 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -54,12 +55,6 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del return DeleteRes{}, ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return DeleteRes{}, err - } - defer release() - result := DeleteRes{} for _, addr := range prm.addr { select { @@ -117,7 +112,8 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil { s.log.Debug(ctx, logs.StorageIDRetrievalFailure, zap.Stringer("object", addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } storageID := res.StorageID() @@ -136,7 +132,8 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil && !client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, zap.Stringer("object_address", addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } return nil diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 2c11b6b01..82ce48dde 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -53,6 +53,10 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { )) defer span.End() + var exists bool + var locked bool + var err error + s.m.RLock() defer s.m.RUnlock() @@ -60,18 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { return ExistsRes{}, ErrShardDisabled } else if s.info.EvacuationInProgress { return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ExistsRes{}, err - } - defer release() - - var exists bool - var locked bool - - if s.info.Mode.NoMetabase() { + } else if s.info.Mode.NoMetabase() { var p common.ExistsPrm p.Address = prm.Address diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index a262a52cb..1b218a372 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -6,13 +6,11 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -33,14 +31,41 @@ type TombstoneSource interface { IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool } -type newEpochHandler func(context.Context, uint64) +// Event represents class of external events. +type Event interface { + typ() eventType +} -type newEpochHandlers struct { +type eventType int + +const ( + _ eventType = iota + eventNewEpoch +) + +type newEpoch struct { + epoch uint64 +} + +func (e newEpoch) typ() eventType { + return eventNewEpoch +} + +// EventNewEpoch returns new epoch event. +func EventNewEpoch(e uint64) Event { + return newEpoch{ + epoch: e, + } +} + +type eventHandler func(context.Context, Event) + +type eventHandlers struct { prevGroup sync.WaitGroup cancelFunc context.CancelFunc - handlers []newEpochHandler + handlers []eventHandler } type gcRunResult struct { @@ -82,10 +107,10 @@ type gc struct { remover func(context.Context) gcRunResult - // newEpochChan is used only for listening for the new epoch event. + // eventChan is used only for listening for the new epoch event. // It is ok to keep opened, we are listening for context done when writing in it. - newEpochChan chan uint64 - newEpochHandlers *newEpochHandlers + eventChan chan Event + mEventHandler map[eventType]*eventHandlers } type gcCfg struct { @@ -115,8 +140,16 @@ func defaultGCCfg() gcCfg { } func (gc *gc) init(ctx context.Context) { - gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + sz := 0 + + for _, v := range gc.mEventHandler { + sz += len(v.handlers) + } + + if sz > 0 { + gc.workerPool = gc.workerPoolInit(sz) + } + gc.wg.Add(2) go gc.tickRemover(ctx) go gc.listenEvents(ctx) @@ -133,7 +166,7 @@ func (gc *gc) listenEvents(ctx context.Context) { case <-ctx.Done(): gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return - case event, ok := <-gc.newEpochChan: + case event, ok := <-gc.eventChan: if !ok { gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return @@ -144,33 +177,38 @@ func (gc *gc) listenEvents(ctx context.Context) { } } -func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { - gc.newEpochHandlers.cancelFunc() - gc.newEpochHandlers.prevGroup.Wait() +func (gc *gc) handleEvent(ctx context.Context, event Event) { + v, ok := gc.mEventHandler[event.typ()] + if !ok { + return + } + + v.cancelFunc() + v.prevGroup.Wait() var runCtx context.Context - runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) + runCtx, v.cancelFunc = context.WithCancel(ctx) - gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) + v.prevGroup.Add(len(v.handlers)) - for i := range gc.newEpochHandlers.handlers { + for i := range v.handlers { select { case <-ctx.Done(): return default: } - h := gc.newEpochHandlers.handlers[i] + h := v.handlers[i] err := gc.workerPool.Submit(func() { - defer gc.newEpochHandlers.prevGroup.Done() - h(runCtx, epoch) + defer v.prevGroup.Done() + h(runCtx, event) }) if err != nil { gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.Error(err), ) - gc.newEpochHandlers.prevGroup.Done() + v.prevGroup.Done() } } } @@ -227,9 +265,6 @@ func (gc *gc) stop(ctx context.Context) { gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() - - gc.newEpochHandlers.cancelFunc() - gc.newEpochHandlers.prevGroup.Wait() } // iterates over metabase and deletes objects @@ -254,7 +289,28 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) - buf, err := s.getGarbage(ctx) + buf := make([]oid.Address, 0, s.rmBatchSize) + + var iterPrm meta.GarbageIterationPrm + iterPrm.SetHandler(func(g meta.GarbageObject) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + buf = append(buf, g.Address()) + + if len(buf) == s.rmBatchSize { + return meta.ErrInterruptIterator + } + + return nil + }) + + // iterate over metabase's objects with GC mark + // (no more than s.rmBatchSize objects) + err := s.metaBase.IterateOverGarbage(ctx, iterPrm) if err != nil { s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, zap.Error(err), @@ -286,46 +342,13 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } -func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - - buf := make([]oid.Address, 0, s.rmBatchSize) - - var iterPrm meta.GarbageIterationPrm - iterPrm.SetHandler(func(g meta.GarbageObject) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - buf = append(buf, g.Address()) - - if len(buf) == s.rmBatchSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { - return nil, err - } - - return buf, nil -} - func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) + workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) + batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) return } -func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -333,8 +356,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -343,7 +366,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { batch = append(batch, o.Address()) @@ -391,25 +414,24 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } - s.handleExpiredObjectsUnsafe(ctx, expired) -} - -func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { - select { - case <-ctx.Done(): - return - default: - } - expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) return } - res, err := s.inhumeGC(ctx, expired) + var inhumePrm meta.InhumePrm + + inhumePrm.SetAddresses(expired...) + inhumePrm.SetGCMark() + + // inhume the collected objects + res, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, + zap.Error(err), + ) + return } @@ -427,12 +449,6 @@ func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Ad } func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - result := make([]oid.Address, 0, len(source)) parentToChildren, err := s.metaBase.GetChildren(ctx, source) if err != nil { @@ -446,20 +462,7 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) return result, nil } -func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return meta.InhumeRes{}, err - } - defer release() - - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(addrs...) - inhumePrm.SetGCMark() - return s.metaBase.Inhume(ctx, inhumePrm) -} - -func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -467,6 +470,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) }() + epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) @@ -499,18 +503,11 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { return } - var release qos.ReleaseFunc - release, err = s.opsLimiter.ReadRequest(ctx) - if err != nil { - log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) - s.m.RUnlock() - return - } err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) - release() if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() + return } @@ -538,7 +535,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { } } -func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -546,8 +543,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -557,14 +554,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { if o.Type() == objectSDK.TypeLock { batch = append(batch, o.Address()) if len(batch) == batchSize { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, epoch, expired) + s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) return egCtx.Err() }) batch = make([]oid.Address, 0, batchSize) @@ -578,7 +575,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { if len(batch) > 0 { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, epoch, expired) + s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) return egCtx.Err() }) } @@ -599,13 +596,7 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo return ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return err - } - defer release() - - err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { + err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { select { case <-ctx.Done(): return meta.ErrInterruptIterator @@ -621,11 +612,12 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo } func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return nil, ErrDegradedMode } - defer release() return s.metaBase.FilterExpired(ctx, epoch, addresses) } @@ -642,15 +634,12 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston return } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) - return - } res, err := s.metaBase.InhumeTombstones(ctx, tss) - release() if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, + zap.Error(err), + ) + return } @@ -670,22 +659,14 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // HandleExpiredLocks unlocks all objects which were locked by lockers. // If successful, marks lockers themselves as garbage. func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return - } - - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + if s.GetMode().NoMetabase() { return } unlocked, err := s.metaBase.FreeLockedBy(lockers) - release() if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, + zap.Error(err), + ) return } @@ -693,15 +674,13 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] var pInhume meta.InhumePrm pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - release, err = s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) - return - } + res, err := s.metaBase.Inhume(ctx, pInhume) - release() if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, + zap.Error(err), + ) + return } @@ -731,40 +710,36 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc return } - s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) + s.handleExpiredObjects(ctx, expiredUnlocked) } // HandleDeletedLocks unlocks all objects which were locked by lockers. func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { + if s.GetMode().NoMetabase() { return } - release, err := s.opsLimiter.WriteRequest(ctx) + _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) - return - } - _, err = s.metaBase.FreeLockedBy(lockers) - release() - if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, + zap.Error(err), + ) + return } } -// NotificationChannel returns channel for new epoch events. -func (s *Shard) NotificationChannel() chan<- uint64 { - return s.gc.newEpochChan +// NotificationChannel returns channel for shard events. +func (s *Shard) NotificationChannel() chan<- Event { + return s.gc.eventChan } -func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") defer span.End() + epoch := e.(newEpoch).epoch + s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) @@ -773,13 +748,7 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { } func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) - return - } ids, err := s.metaBase.ZeroSizeContainers(ctx) - release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return @@ -791,13 +760,7 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui } func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) - return - } ids, err := s.metaBase.ZeroCountContainers(ctx) - release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 54d2f1510..9998bbae2 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -37,8 +37,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), + blobovniczatree.WithLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index f512a488a..e3670b441 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { require.NoError(t, err) epoch.Value = 105 - sh.gc.handleEvent(context.Background(), epoch.Value) + sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) var getPrm GetPrm getPrm.SetAddress(objectCore.AddressOf(obj)) @@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { require.True(t, errors.As(err, &splitInfoError), "split info must be provided") epoch.Value = 105 - sh.gc.handleEvent(context.Background(), epoch.Value) + sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 28f8912be..15d1eb6ba 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -10,6 +10,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -111,12 +112,6 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return c.Get(ctx, prm.addr) } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return GetRes{}, err - } - defer release() - skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) @@ -160,12 +155,14 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta if client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta)) + zap.Bool("skip_meta", skipMeta), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta)) + zap.Bool("skip_meta", skipMeta), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } if skipMeta || mErr != nil { diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index 34b8290d6..ff57e3bf9 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -81,12 +81,6 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { headParams.SetAddress(prm.addr) headParams.SetRaw(prm.raw) - release, limitErr := s.opsLimiter.ReadRequest(ctx) - if limitErr != nil { - return HeadRes{}, limitErr - } - defer release() - var res meta.GetRes res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 7391adef2..26492cf01 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { } shardID := s.info.ID.String() - s.metricsWriter.SetShardID(shardID) + s.cfg.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } @@ -61,7 +61,6 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { if s.pilorama != nil { s.pilorama.SetParentID(s.info.ID.String()) } - s.opsLimiter.SetParentID(s.info.ID.String()) if len(idFromMetabase) == 0 && !modeDegraded { if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index c0fd65f4b..d46400869 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" @@ -81,12 +82,6 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return InhumeRes{}, err - } - defer release() - if s.hasWriteCache() { for i := range prm.target { _ = s.writeCache.Delete(ctx, prm.target[i]) @@ -116,6 +111,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) s.m.RUnlock() diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index af87981ca..c5275dafd 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -106,12 +107,6 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { return SelectRes{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return SelectRes{}, err - } - defer release() - lst, err := s.metaBase.Containers(ctx) if err != nil { return res, fmt.Errorf("list stored containers: %w", err) @@ -129,7 +124,8 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { if err != nil { s.log.Debug(ctx, logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) continue } @@ -151,12 +147,6 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo return ListContainersRes{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ListContainersRes{}, err - } - defer release() - containers, err := s.metaBase.Containers(ctx) if err != nil { return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) @@ -185,12 +175,6 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List return ListWithCursorRes{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return ListWithCursorRes{}, err - } - defer release() - var metaPrm meta.ListPrm metaPrm.SetCount(prm.count) metaPrm.SetCursor(prm.cursor) @@ -220,15 +204,9 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai return ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return err - } - defer release() - var metaPrm meta.IterateOverContainersPrm metaPrm.Handler = prm.Handler - err = s.metaBase.IterateOverContainers(ctx, metaPrm) + err := s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over containers: %w", err) } @@ -251,17 +229,11 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return err - } - defer release() - var metaPrm meta.IterateOverObjectsInContainerPrm metaPrm.ContainerID = prm.ContainerID metaPrm.ObjectType = prm.ObjectType metaPrm.Handler = prm.Handler - err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over objects: %w", err) } @@ -281,12 +253,6 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive return 0, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() - var metaPrm meta.CountAliveObjectsInContainerPrm metaPrm.ObjectType = prm.ObjectType metaPrm.ContainerID = prm.ContainerID diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 9c392fdac..31ca16aa1 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -38,13 +38,7 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked [] return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - - err = s.metaBase.Lock(ctx, idCnr, locker, locked) + err := s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -67,12 +61,6 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return false, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return false, err - } - defer release() - var prm meta.IsLockedPrm prm.SetAddress(addr) @@ -98,12 +86,5 @@ func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error if m.NoMetabase() { return nil, ErrDegradedMode } - - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() - return s.metaBase.GetLocks(ctx, addr) } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 3878a65cd..5caf3641f 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -28,10 +28,9 @@ func TestShard_Lock(t *testing.T) { var sh *Shard rootPath := t.TempDir() - l := logger.NewLoggerWrapper(zap.NewNop()) opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(l), + WithLogger(logger.NewLoggerWrapper(zap.NewNop())), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index f8cb00a31..3f23111af 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -67,12 +67,6 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var res common.PutRes - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return PutRes{}, err - } - defer release() - // exist check are not performed there, these checks should be executed // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 443689104..701268820 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -131,12 +131,6 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { return obj, nil } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return RngRes{}, err - } - defer release() - skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 06fe9f511..146e834cc 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -79,8 +79,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), + blobovniczatree.WithLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 20f1f2b6f..10eb51a28 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -6,13 +6,10 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -21,9 +18,37 @@ import ( var ErrRebuildInProgress = errors.New("shard rebuild in progress") +type RebuildWorkerLimiter interface { + AcquireWorkSlot(ctx context.Context) error + ReleaseWorkSlot() +} + +type rebuildLimiter struct { + semaphore chan struct{} +} + +func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter { + return &rebuildLimiter{ + semaphore: make(chan struct{}, workersCount), + } +} + +func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error { + select { + case l.semaphore <- struct{}{}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (l *rebuildLimiter) ReleaseWorkSlot() { + <-l.semaphore +} + type rebuildTask struct { - concurrencyLimiter common.RebuildLimiter - fillPercent int + limiter RebuildWorkerLimiter + fillPercent int } type rebuilder struct { @@ -63,14 +88,14 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D if !ok { continue } - runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter) + runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter) } } }() } func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger, - fillPercent int, concLimiter common.RebuildLimiter, + fillPercent int, limiter RebuildWorkerLimiter, ) { select { case <-ctx.Done(): @@ -78,22 +103,21 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo default: } log.Info(ctx, logs.BlobstoreRebuildStarted) - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) - if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { + if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) } } -func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int, +func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int, ) error { select { case <-ctx.Done(): return ctx.Err() case r.tasks <- rebuildTask{ - concurrencyLimiter: limiter, - fillPercent: fillPercent, + limiter: limiter, + fillPercent: fillPercent, }: return nil default: @@ -142,7 +166,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres } type RebuildPrm struct { - ConcurrencyLimiter common.ConcurrencyLimiter + ConcurrencyLimiter RebuildWorkerLimiter TargetFillPercent uint32 } @@ -164,30 +188,5 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error { return ErrDegradedMode } - limiter := &rebuildLimiter{ - concurrencyLimiter: p.ConcurrencyLimiter, - rateLimiter: s.opsLimiter, - } - return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent)) -} - -var _ common.RebuildLimiter = (*rebuildLimiter)(nil) - -type rebuildLimiter struct { - concurrencyLimiter common.ConcurrencyLimiter - rateLimiter qos.Limiter -} - -func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { - return r.concurrencyLimiter.AcquireWorkSlot(ctx) -} - -func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) { - release, err := r.rateLimiter.ReadRequest(ctx) - return common.ReleaseFunc(release), err -} - -func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) { - release, err := r.rateLimiter.WriteRequest(ctx) - return common.ReleaseFunc(release), err + return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent)) } diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index fbc751e26..c7c7e11c2 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -60,12 +60,6 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { return SelectRes{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return SelectRes{}, nil - } - defer release() - var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index d89b56266..1eb7f14d0 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,7 +7,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -99,8 +98,6 @@ type cfg struct { reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider - - opsLimiter qos.Limiter } func defaultCfg() *cfg { @@ -112,7 +109,6 @@ func defaultCfg() *cfg { zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, - opsLimiter: qos.NewNoopLimiter(), } } @@ -205,7 +201,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option { func WithLogger(l *logger.Logger) Option { return func(c *cfg) { c.log = l - c.gcCfg.log = l.WithTag(logger.TagGC) + c.gcCfg.log = l } } @@ -218,7 +214,7 @@ func WithWriteCache(use bool) Option { // hasWriteCache returns bool if write cache exists on shards. func (s *Shard) hasWriteCache() bool { - return s.useWriteCache + return s.cfg.useWriteCache } // NeedRefillMetabase returns true if metabase is needed to be refilled. @@ -372,22 +368,16 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { } } -func WithLimiter(l qos.Limiter) Option { - return func(c *cfg) { - c.opsLimiter = l - } -} - func (s *Shard) fillInfo() { - s.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.info.BlobStorInfo = s.blobStor.DumpInfo() - s.info.Mode = s.GetMode() + s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() + s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() + s.cfg.info.Mode = s.GetMode() - if s.useWriteCache { - s.info.WriteCacheInfo = s.writeCache.DumpInfo() + if s.cfg.useWriteCache { + s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() } if s.pilorama != nil { - s.info.PiloramaInfo = s.pilorama.DumpInfo() + s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() } } @@ -454,57 +444,57 @@ func (s *Shard) updateMetrics(ctx context.Context) { s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) } - s.metricsWriter.SetMode(s.info.Mode) + s.cfg.metricsWriter.SetMode(s.info.Mode) } // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - s.metricsWriter.IncObjectCounter(physical) - s.metricsWriter.IncObjectCounter(logical) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + s.cfg.metricsWriter.IncObjectCounter(physical) + s.cfg.metricsWriter.IncObjectCounter(logical) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) if isUser { - s.metricsWriter.IncObjectCounter(user) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) + s.cfg.metricsWriter.IncObjectCounter(user) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { if v > 0 { - s.metricsWriter.AddToObjectCounter(typ, -int(v)) + s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { if v > 0 { - s.metricsWriter.SetObjectCounter(typ, v) + s.cfg.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { for cnrID, count := range byCnr { if count.Phy > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) + s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) } if count.Logic > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) + s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) } if count.User > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) + s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) } } } func (s *Shard) addToContainerSize(cnr string, size int64) { if size != 0 { - s.metricsWriter.AddToContainerSize(cnr, size) + s.cfg.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { if size != 0 { - s.metricsWriter.AddToPayloadSize(size) + s.cfg.metricsWriter.AddToPayloadSize(size) } } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index 84be71c4d..f9ee34488 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -60,8 +60,7 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), + blobovniczatree.WithLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index db361a8bd..01a014cec 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -43,11 +43,6 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return nil, err - } - defer release() return s.pilorama.TreeMove(ctx, d, treeID, m) } @@ -80,11 +75,6 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return nil, err - } - defer release() return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } @@ -113,11 +103,6 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * if s.info.Mode.NoMetabase() { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } @@ -145,11 +130,6 @@ func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string if s.info.Mode.NoMetabase() { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) } @@ -177,11 +157,6 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } @@ -207,11 +182,6 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return pilorama.Meta{}, 0, err - } - defer release() return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } @@ -237,16 +207,11 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } // TreeSortedByFilename implements the pilorama.Forest interface. -func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { +func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), @@ -266,11 +231,6 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID if s.info.Mode.NoMetabase() { return nil, last, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, last, err - } - defer release() return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) } @@ -296,11 +256,6 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return pilorama.Move{}, err - } - defer release() return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } @@ -325,11 +280,6 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro if s.info.Mode.NoMetabase() { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() return s.pilorama.TreeDrop(ctx, cid, treeID) } @@ -353,11 +303,6 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() return s.pilorama.TreeList(ctx, cid) } @@ -381,11 +326,6 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u if s.pilorama == nil { return 0, ErrPiloramaDisabled } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() return s.pilorama.TreeHeight(ctx, cid, treeID) } @@ -410,11 +350,6 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return false, err - } - defer release() return s.pilorama.TreeExists(ctx, cid, treeID) } @@ -443,11 +378,6 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre if s.info.Mode.NoMetabase() { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } @@ -472,11 +402,6 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return 0, err - } - defer release() return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } @@ -498,11 +423,6 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - release, err := s.opsLimiter.ReadRequest(ctx) - if err != nil { - return nil, err - } - defer release() return s.pilorama.TreeListTrees(ctx, prm) } @@ -532,10 +452,5 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index 9edb89df8..f655e477a 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -67,12 +67,6 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error return ErrDegradedMode } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) } @@ -130,13 +124,6 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { close(started) defer cleanup() - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) - return - } - defer release() - s.log.Info(ctx, logs.StartedWritecacheSealAsync) if err := s.writeCache.Seal(ctx, prm); err != nil { s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) @@ -151,11 +138,5 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { return nil } } - release, err := s.opsLimiter.WriteRequest(ctx) - if err != nil { - return err - } - defer release() - return s.writeCache.Seal(ctx, prm) } diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index ee709ea73..e829d013c 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -6,7 +6,6 @@ import ( "sync" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -62,7 +61,6 @@ func New(opts ...Option) Cache { maxCacheSize: defaultMaxCacheSize, metrics: DefaultMetrics(), flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize, - qosLimiter: qos.NewNoopLimiter(), }, } @@ -96,8 +94,7 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error { if err != nil { return metaerr.Wrap(err) } - c.initCounters() - return nil + return metaerr.Wrap(c.initCounters()) } // Init runs necessary services. diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 893d27ba2..d9e34ceab 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -6,7 +6,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -15,7 +14,6 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -37,7 +35,6 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String()) fl := newFlushLimiter(c.flushSizeLimit) c.wg.Add(1) go func() { @@ -67,13 +64,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { continue } - release, err := c.qosLimiter.ReadRequest(ctx) - if err != nil { - c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err)) - c.modeMtx.RUnlock() - continue - } - err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { + err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { if err := fl.acquire(oi.DataSize); err != nil { return err } @@ -88,15 +79,11 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { return ctx.Err() } }) - release() if err != nil { c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) } c.modeMtx.RUnlock() - - // counter changed by fstree - c.estimateCacheSize() case <-ctx.Done(): return } @@ -120,12 +107,6 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) { func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) { defer fl.release(objInfo.size) - release, err := c.qosLimiter.WriteRequest(ctx) - if err != nil { - c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err)) - return - } - defer release() res, err := c.fsTree.Get(ctx, common.GetPrm{ Address: objInfo.addr, }) diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go index 0e020b36e..ddc4101be 100644 --- a/pkg/local_object_storage/writecache/limiter.go +++ b/pkg/local_object_storage/writecache/limiter.go @@ -3,8 +3,6 @@ package writecache import ( "errors" "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) var errLimiterClosed = errors.New("acquire failed: limiter closed") @@ -47,11 +45,17 @@ func (l *flushLimiter) release(size uint64) { l.cond.L.Lock() defer l.cond.L.Unlock() - assert.True(l.size >= size, "flushLimiter: invalid size") - l.size -= size + if l.size >= size { + l.size -= size + } else { + panic("flushLimiter: invalid size") + } - assert.True(l.count > 0, "flushLimiter: invalid count") - l.count-- + if l.count > 0 { + l.count-- + } else { + panic("flushLimiter: invalid count") + } l.cond.Broadcast() } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index a4f98ad06..f2957fe98 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -3,8 +3,8 @@ package writecache import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "go.uber.org/zap" ) // Option represents write-cache configuration option. @@ -38,14 +38,12 @@ type options struct { disableBackgroundFlush bool // flushSizeLimit is total size of flushing objects. flushSizeLimit uint64 - // qosLimiter used to limit flush RPS. - qosLimiter qos.Limiter } // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = log + o.log = log.With(zap.String("component", "WriteCache")) } } @@ -138,9 +136,3 @@ func WithFlushSizeLimit(v uint64) Option { o.flushSizeLimit = v } } - -func WithQoSLimiter(l qos.Limiter) Option { - return func(o *options) { - o.qosLimiter = l - } -} diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index 2fbf50913..7da5c4d3a 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -2,7 +2,6 @@ package writecache import ( "context" - "fmt" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -60,15 +59,7 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro // putBig writes object to FSTree and pushes it to the flush workers queue. func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { - if prm.RawData == nil { // foolproof: RawData should be marshalled by shard. - data, err := prm.Object.Marshal() - if err != nil { - return fmt.Errorf("cannot marshal object: %w", err) - } - prm.RawData = data - } - size := uint64(len(prm.RawData)) - if !c.hasEnoughSpace(size) { + if !c.hasEnoughSpaceFS() { return ErrOutOfSpace } diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go index 7a52d3672..835686fbb 100644 --- a/pkg/local_object_storage/writecache/state.go +++ b/pkg/local_object_storage/writecache/state.go @@ -7,6 +7,10 @@ func (c *cache) estimateCacheSize() (uint64, uint64) { return count, size } +func (c *cache) hasEnoughSpaceFS() bool { + return c.hasEnoughSpace(c.maxObjectSize) +} + func (c *cache) hasEnoughSpace(objectSize uint64) bool { count, size := c.estimateCacheSize() if c.maxCacheCount > 0 && count+1 > c.maxCacheCount { @@ -15,6 +19,7 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool { return c.maxCacheSize >= size+objectSize } -func (c *cache) initCounters() { +func (c *cache) initCounters() error { c.estimateCacheSize() + return nil } diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 7ed511318..70b17eb8e 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -52,7 +52,7 @@ type Cache interface { // MainStorage is the interface of the underlying storage of Cache implementations. type MainStorage interface { - Compressor() *compression.Compressor + Compressor() *compression.Config Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) Put(context.Context, common.PutPrm) (common.PutRes, error) } diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index 4462daab4..a5fb8e82a 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -1,7 +1,6 @@ package balance import ( - "context" "fmt" "math/big" @@ -11,14 +10,17 @@ import ( // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. -func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) { - h := id.ScriptHash() +func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { + h, err := id.ScriptHash() + if err != nil { + return nil, err + } invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(balanceOfMethod) invokePrm.SetArgs(h) - prms, err := c.client.TestInvoke(ctx, invokePrm) + prms, err := c.client.TestInvoke(invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err) } else if ln := len(prms); ln != 1 { diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go index 57e61d62b..c2a66dded 100644 --- a/pkg/morph/client/balance/decimals.go +++ b/pkg/morph/client/balance/decimals.go @@ -1,7 +1,6 @@ package balance import ( - "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -9,11 +8,11 @@ import ( // Decimals decimal precision of currency transactions // through the Balance contract call, and returns it. -func (c *Client) Decimals(ctx context.Context) (uint32, error) { +func (c *Client) Decimals() (uint32, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(decimalsMethod) - prms, err := c.client.TestInvoke(ctx, invokePrm) + prms, err := c.client.TestInvoke(invokePrm) if err != nil { return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err) } else if ln := len(prms); ln != 1 { diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 870bed166..52d69dccb 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -22,15 +22,22 @@ type TransferPrm struct { // TransferX transfers p.Amount of GASe-12 from p.From to p.To // with details p.Details through direct smart contract call. func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { - from := p.From.ScriptHash() - to := p.To.ScriptHash() + from, err := p.From.ScriptHash() + if err != nil { + return err + } + + to, err := p.To.ScriptHash() + if err != nil { + return err + } prm := client.InvokePrm{} prm.SetMethod(transferXMethod) prm.SetArgs(from, to, p.Amount, p.Details) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(ctx, prm) + _, err = c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("invoke method (%s): %w", transferXMethod, err) } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index aab058d27..01fcc98e5 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -9,7 +9,6 @@ import ( "sync/atomic" "time" - nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" @@ -61,9 +60,6 @@ type Client struct { rpcActor *actor.Actor // neo-go RPC actor gasToken *nep17.Token // neo-go GAS token wrapper rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper - nnsHash util.Uint160 // NNS contract hash - - nnsReader *nnsClient.ContractReader // NNS contract wrapper acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -98,12 +94,27 @@ type Client struct { type cache struct { m sync.RWMutex + nnsHash *util.Uint160 gKey *keys.PublicKey txHeights *lru.Cache[util.Uint256, uint32] metrics metrics.MorphCacheMetrics } +func (c *cache) nns() *util.Uint160 { + c.m.RLock() + defer c.m.RUnlock() + + return c.nnsHash +} + +func (c *cache) setNNSHash(nnsHash util.Uint160) { + c.m.Lock() + defer c.m.Unlock() + + c.nnsHash = &nnsHash +} + func (c *cache) groupKey() *keys.PublicKey { c.m.RLock() defer c.m.RUnlock() @@ -122,6 +133,7 @@ func (c *cache) invalidate() { c.m.Lock() defer c.m.Unlock() + c.nnsHash = nil c.gKey = nil c.txHeights.Purge() } @@ -151,6 +163,20 @@ func (e *notHaltStateError) Error() string { ) } +// implementation of error interface for FrostFS-specific errors. +type frostfsError struct { + err error +} + +func (e frostfsError) Error() string { + return fmt.Sprintf("frostfs error: %v", e.err) +} + +// wraps FrostFS-specific error into frostfsError. Arg must not be nil. +func wrapFrostFSError(err error) error { + return frostfsError{err} +} + // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. @@ -187,7 +213,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F // If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. // batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. // The default batchSize is 100, the default limit from neo-go. -func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error { +func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error { start := time.Now() success := false defer func() { @@ -214,7 +240,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int if err != nil { return err } else if val.State != HaltState { - return ¬HaltStateError{state: val.State, exception: val.FaultException} + return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) } arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) @@ -236,7 +262,10 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int }() // Batch size for TraverseIterator() can restricted on the server-side. - traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems) + traverseBatchSize := batchSize + if invoker.DefaultIteratorResultItems < traverseBatchSize { + traverseBatchSize = invoker.DefaultIteratorResultItems + } for { items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize) if err != nil { @@ -278,7 +307,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) ( } if val.State != HaltState { - return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} + return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) } success = true @@ -470,7 +499,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { // NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain // stores alphabet node keys of inner ring there, however the sidechain stores both // alphabet and non alphabet node keys of inner ring. -func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) { +func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -565,7 +594,6 @@ func (c *Client) setActor(act *actor.Actor) { c.rpcActor = act c.gasToken = nep17.New(act, gas.Hash) c.rolemgmt = rolemgmt.New(act) - c.nnsReader = nnsClient.NewReader(act, c.nnsHash) } func (c *Client) GetActor() *actor.Actor { diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index e4dcd0db7..d061747bb 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -145,11 +145,6 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er if cli.client == nil { return nil, ErrNoHealthyEndpoint } - cs, err := cli.client.GetContractStateByID(nnsContractID) - if err != nil { - return nil, fmt.Errorf("resolve nns hash: %w", err) - } - cli.nnsHash = cs.Hash cli.setActor(act) go cli.closeWaiter(ctx) diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index 60fb8ad7c..074a586be 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -1,7 +1,6 @@ package container import ( - "context" "errors" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -14,7 +13,7 @@ import ( // to the specified user of FrostFS system. If idUser is nil, returns the list of all containers. // // If remote RPC does not support neo-go session API, fallback to List() method. -func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) { +func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { var cidList []cid.ID var err error @@ -22,7 +21,7 @@ func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, e cidList = append(cidList, id) return nil } - if err = c.IterateContainersOf(ctx, idUser, cb); err != nil { + if err = c.IterateContainersOf(idUser, cb); err != nil { return nil, err } return cidList, nil @@ -31,7 +30,7 @@ func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, e // iterateContainers iterates over a list of container identifiers // belonging to the specified user of FrostFS system and executes // `cb` on each element. If idUser is nil, calls it on the list of all containers. -func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error { +func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error) error { var rawID []byte if idUser != nil { rawID = idUser.WalletBytes() @@ -60,7 +59,7 @@ func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb fu cnrHash := c.client.ContractAddress() err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { - return c.iterate(ctx, idUser, cb) + return c.iterate(idUser, cb) } return err diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go index 90bcdd7d5..b86e0ce9c 100644 --- a/pkg/morph/client/container/deletion_info.go +++ b/pkg/morph/client/container/deletion_info.go @@ -1,7 +1,6 @@ package container import ( - "context" "crypto/sha256" "fmt" "strings" @@ -15,27 +14,27 @@ import ( "github.com/mr-tron/base58" ) -func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) { - return DeletionInfo(ctx, (*Client)(x), cnr) +func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) { + return DeletionInfo((*Client)(x), cnr) } type deletionInfo interface { - DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) + DeletionInfo(cid []byte) (*containercore.DelInfo, error) } -func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { +func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.DeletionInfo(ctx, binCnr) + return c.DeletionInfo(binCnr) } -func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) { +func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { prm := client.TestInvokePrm{} prm.SetMethod(deletionInfoMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(ctx, prm) + res, err := c.client.TestInvoke(prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index 8622d2cdd..2ab58bf01 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -1,7 +1,6 @@ package container import ( - "context" "crypto/sha256" "fmt" "strings" @@ -17,8 +16,8 @@ import ( type containerSource Client -func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) { - return Get(ctx, (*Client)(x), cnr) +func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) { + return Get((*Client)(x), cnr) } // AsContainerSource provides container Source interface @@ -28,15 +27,15 @@ func AsContainerSource(w *Client) containercore.Source { } type getContainer interface { - Get(ctx context.Context, cid []byte) (*containercore.Container, error) + Get(cid []byte) (*containercore.Container, error) } // Get marshals container ID, and passes it to Wrapper's Get method. -func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) { +func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.Get(ctx, binCnr) + return c.Get(binCnr) } // Get reads the container from FrostFS system by binary identifier @@ -44,12 +43,12 @@ func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Contai // // If an empty slice is returned for the requested identifier, // storage.ErrNotFound error is returned. -func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { +func (c *Client) Get(cid []byte) (*containercore.Container, error) { prm := client.TestInvokePrm{} prm.SetMethod(getMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(ctx, prm) + res, err := c.client.TestInvoke(prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index fc63d1beb..78ea8278f 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -1,7 +1,6 @@ package container import ( - "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -16,7 +15,7 @@ import ( // // Iterates through the identifiers of all FrostFS containers if pointer // to user identifier is nil. -func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error { +func (c *Client) iterate(idUser *user.ID, cb func(cid.ID) error) error { var rawID []byte if idUser != nil { @@ -27,7 +26,7 @@ func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) e prm.SetMethod(listMethod) prm.SetArgs(rawID) - res, err := c.client.TestInvoke(ctx, prm) + res, err := c.client.TestInvoke(prm) if err != nil { return fmt.Errorf("test invoke (%s): %w", listMethod, err) } else if ln := len(res); ln != 1 { diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go index 3a789672a..305f3ce09 100644 --- a/pkg/morph/client/frostfsid/subject.go +++ b/pkg/morph/client/frostfsid/subject.go @@ -1,7 +1,6 @@ package frostfsid import ( - "context" "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" @@ -15,12 +14,12 @@ const ( methodGetSubjectExtended = "getSubjectExtended" ) -func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubject) prm.SetArgs(addr) - res, err := c.client.TestInvoke(ctx, prm) + res, err := c.client.TestInvoke(prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err) } @@ -38,12 +37,12 @@ func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidc return subj, nil } -func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { +func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubjectExtended) prm.SetArgs(addr) - res, err := c.client.TestInvoke(ctx, prm) + res, err := c.client.TestInvoke(prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err) } diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index b9e39c25e..708d3b39f 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -2,7 +2,6 @@ package client import ( "context" - "slices" "sort" "time" @@ -100,7 +99,8 @@ mainLoop: case <-t.C: c.switchLock.RLock() - endpointsCopy := slices.Clone(c.endpoints.list) + endpointsCopy := make([]Endpoint, len(c.endpoints.list)) + copy(endpointsCopy, c.endpoints.list) currPriority := c.endpoints.list[c.endpoints.curr].Priority highestPriority := c.endpoints.list[0].Priority diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 3f6aed506..2b87df6f7 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -2,6 +2,7 @@ package netmap import ( "context" + "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -24,45 +25,75 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. -func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, MaxObjectSizeConfig) +func (c *Client) MaxObjectSize() (uint64, error) { + objectSize, err := c.readUInt64Config(MaxObjectSizeConfig) + if err != nil { + return 0, err + } + + return objectSize, nil } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. -func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, EpochDurationConfig) +func (c *Client) EpochDuration() (uint64, error) { + epochDuration, err := c.readUInt64Config(EpochDurationConfig) + if err != nil { + return 0, err + } + + return epochDuration, nil } // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. -func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, ContainerFeeConfig) +func (c *Client) ContainerFee() (uint64, error) { + fee, err := c.readUInt64Config(ContainerFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. -func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, ContainerAliasFeeConfig) +func (c *Client) ContainerAliasFee() (uint64, error) { + fee, err := c.readUInt64Config(ContainerAliasFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // HomomorphicHashDisabled returns global configuration value of homomorphic hashing // settings. // // Returns (false, nil) if config key is not found in the contract. -func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { - return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey) +func (c *Client) HomomorphicHashDisabled() (bool, error) { + return c.readBoolConfig(HomomorphicHashingDisabledKey) } // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. -func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, IrCandidateFeeConfig) +func (c *Client) InnerRingCandidateFee() (uint64, error) { + fee, err := c.readUInt64Config(IrCandidateFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. -func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, WithdrawFeeConfig) +func (c *Client) WithdrawFee() (uint64, error) { + fee, err := c.readUInt64Config(WithdrawFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // MaintenanceModeAllowed reads admission of "maintenance" state from the @@ -70,32 +101,34 @@ func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { // that storage nodes are allowed to switch their state to "maintenance". // // By default, maintenance state is disallowed. -func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { - return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig) +func (c *Client) MaintenanceModeAllowed() (bool, error) { + return c.readBoolConfig(MaintenanceModeAllowedConfig) } -func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { - v, err := c.config(ctx, []byte(key)) +func (c *Client) readUInt64Config(key string) (uint64, error) { + v, err := c.config([]byte(key), IntegerAssert) if err != nil { return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) } - bi, err := v.TryInteger() - if err != nil { - return 0, err - } - return bi.Uint64(), nil + // IntegerAssert is guaranteed to return int64 if the error is nil. + return uint64(v.(int64)), nil } // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. -func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { - v, err := c.config(ctx, []byte(key)) +func (c *Client) readBoolConfig(key string) (bool, error) { + v, err := c.config([]byte(key), BoolAssert) if err != nil { + if errors.Is(err, ErrConfigNotFound) { + return false, nil + } + return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } - return v.TryBool() + // BoolAssert is guaranteed to return bool if the error is nil. + return v.(bool), nil } // SetConfigPrm groups parameters of SetConfig operation. @@ -166,12 +199,12 @@ type NetworkConfiguration struct { } // ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain. -func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) { +func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) { var res NetworkConfiguration prm := client.TestInvokePrm{} prm.SetMethod(configListMethod) - items, err := c.client.TestInvoke(ctx, prm) + items, err := c.client.TestInvoke(prm) if err != nil { return res, fmt.Errorf("test invoke (%s): %w", configListMethod, err) @@ -244,16 +277,20 @@ func bytesToBool(val []byte) bool { return false } +// ErrConfigNotFound is returned when the requested key was not found +// in the network config (returned value is `Null`). +var ErrConfigNotFound = errors.New("config value not found") + // config performs the test invoke of get config value // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { +func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) - items, err := c.client.TestInvoke(ctx, prm) + items, err := c.client.TestInvoke(prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", configMethod, err) @@ -264,7 +301,26 @@ func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) configMethod, ln) } - return items[0], nil + if _, ok := items[0].(stackitem.Null); ok { + return nil, ErrConfigNotFound + } + + return assert(items[0]) +} + +// IntegerAssert converts stack item to int64. +func IntegerAssert(item stackitem.Item) (any, error) { + return client.IntFromStackItem(item) +} + +// StringAssert converts stack item to string. +func StringAssert(item stackitem.Item) (any, error) { + return client.StringFromStackItem(item) +} + +// BoolAssert converts stack item to bool. +func BoolAssert(item stackitem.Item) (any, error) { + return client.BoolFromStackItem(item) } // iterateRecords iterates over all config records and passes them to f. diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go index 8561329ec..6d7394998 100644 --- a/pkg/morph/client/netmap/epoch.go +++ b/pkg/morph/client/netmap/epoch.go @@ -1,7 +1,6 @@ package netmap import ( - "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -9,11 +8,11 @@ import ( // Epoch receives number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) Epoch(ctx context.Context) (uint64, error) { +func (c *Client) Epoch() (uint64, error) { prm := client.TestInvokePrm{} prm.SetMethod(epochMethod) - items, err := c.client.TestInvoke(ctx, prm) + items, err := c.client.TestInvoke(prm) if err != nil { return 0, fmt.Errorf("test invoke (%s): %w", epochMethod, err) @@ -33,11 +32,11 @@ func (c *Client) Epoch(ctx context.Context) (uint64, error) { // LastEpochBlock receives block number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) { +func (c *Client) LastEpochBlock() (uint32, error) { prm := client.TestInvokePrm{} prm.SetMethod(lastEpochBlockMethod) - items, err := c.client.TestInvoke(ctx, prm) + items, err := c.client.TestInvoke(prm) if err != nil { return 0, fmt.Errorf("test invoke (%s): %w", lastEpochBlockMethod, err) diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index 0e1f9186b..0cfad4c82 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -40,11 +40,11 @@ func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { } // GetInnerRingList return current IR list. -func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) { +func (c *Client) GetInnerRingList() (keys.PublicKeys, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(innerRingListMethod) - prms, err := c.client.TestInvoke(ctx, invokePrm) + prms, err := c.client.TestInvoke(invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err) } diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go index 97782fc25..a0009ea73 100644 --- a/pkg/morph/client/netmap/netmap.go +++ b/pkg/morph/client/netmap/netmap.go @@ -1,7 +1,6 @@ package netmap import ( - "context" "fmt" netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" @@ -12,12 +11,12 @@ import ( // GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and // decodes netmap.NetMap from the response. -func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(epochSnapshotMethod) invokePrm.SetArgs(epoch) - res, err := c.client.TestInvoke(ctx, invokePrm) + res, err := c.client.TestInvoke(invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", epochSnapshotMethod, err) @@ -35,11 +34,11 @@ func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.Ne // GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo // from the response. -func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) { +func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapCandidatesMethod) - res, err := c.client.TestInvoke(ctx, invokePrm) + res, err := c.client.TestInvoke(invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err) } @@ -52,11 +51,11 @@ func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) { } // NetMap calls "netmap" method and decode netmap.NetMap from the response. -func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) { +func (c *Client) NetMap() (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapMethod) - res, err := c.client.TestInvoke(ctx, invokePrm) + res, err := c.client.TestInvoke(invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", netMapMethod, err) diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go index 9dbec1a90..a5134bcef 100644 --- a/pkg/morph/client/netmap/snapshot.go +++ b/pkg/morph/client/netmap/snapshot.go @@ -1,7 +1,6 @@ package netmap import ( - "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -9,12 +8,12 @@ import ( ) // GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response. -func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) { prm := client.TestInvokePrm{} prm.SetMethod(snapshotMethod) prm.SetArgs(diff) - res, err := c.client.TestInvoke(ctx, prm) + res, err := c.client.TestInvoke(prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err) } diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index bc00eb889..f292dccf1 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -8,12 +8,14 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) const ( @@ -35,8 +37,12 @@ const ( NNSPolicyContractName = "policy.frostfs" ) -// ErrNNSRecordNotFound means that there is no such record in NNS contract. -var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") +var ( + // ErrNNSRecordNotFound means that there is no such record in NNS contract. + ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") + + errEmptyResultStack = errors.New("returned result stack is empty") +) // NNSAlphabetContractName returns contract name of the alphabet contract in NNS // based on alphabet index. @@ -55,36 +61,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - sh, err = nnsResolve(c.nnsReader, name) + nnsHash, err := c.NNSHash() + if err != nil { + return util.Uint160{}, err + } + + sh, err = nnsResolve(c.client, nnsHash, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) } return sh, nil } -func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { - available, err := r.IsAvailable(domain) +// NNSHash returns NNS contract hash. +func (c *Client) NNSHash() (util.Uint160, error) { + c.switchLock.RLock() + defer c.switchLock.RUnlock() + + if c.inactive { + return util.Uint160{}, ErrConnectionLost + } + + success := false + startedAt := time.Now() + + defer func() { + c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt)) + }() + + nnsHash := c.cache.nns() + + if nnsHash == nil { + cs, err := c.client.GetContractStateByID(nnsContractID) + if err != nil { + return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err) + } + + c.cache.setNNSHash(cs.Hash) + nnsHash = &cs.Hash + } + success = true + return *nnsHash, nil +} + +func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { + found, err := exists(c, nnsHash, domain) if err != nil { return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } - if available { + if !found { return nil, ErrNNSRecordNotFound } - return r.Resolve(domain, big.NewInt(int64(nns.TXT))) + result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{ + { + Type: smartcontract.StringType, + Value: domain, + }, + { + Type: smartcontract.IntegerType, + Value: big.NewInt(int64(nns.TXT)), + }, + }, nil) + if err != nil { + return nil, err + } + if result.State != vmstate.Halt.String() { + return nil, fmt.Errorf("invocation failed: %s", result.FaultException) + } + if len(result.Stack) == 0 { + return nil, errEmptyResultStack + } + return result.Stack[0], nil } -func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { - arr, err := nnsResolveItem(r, domain) +func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) { + res, err := nnsResolveItem(c, nnsHash, domain) if err != nil { return util.Uint160{}, err } - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") + // Parse the result of resolving NNS record. + // It works with multiple formats (corresponding to multiple NNS versions). + // If array of hashes is provided, it returns only the first one. + if arr, ok := res.Value().([]stackitem.Item); ok { + if len(arr) == 0 { + return util.Uint160{}, errors.New("NNS record is missing") + } + res = arr[0] } - bs, err := arr[0].TryBytes() + bs, err := res.TryBytes() if err != nil { return util.Uint160{}, fmt.Errorf("malformed response: %w", err) } @@ -104,6 +171,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error return util.Uint160{}, errors.New("no valid hashes are found") } +func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) { + result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{ + { + Type: smartcontract.StringType, + Value: domain, + }, + }, nil) + if err != nil { + return false, err + } + + if len(result.Stack) == 0 { + return false, errEmptyResultStack + } + + res := result.Stack[0] + + available, err := res.TryBool() + if err != nil { + return false, fmt.Errorf("malformed response: %w", err) + } + + // not available means that it is taken + // and, therefore, exists + return !available, nil +} + // SetGroupSignerScope makes the default signer scope include all FrostFS contracts. // Should be called for side-chain client only. func (c *Client) SetGroupSignerScope() error { @@ -147,12 +241,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) + nnsHash, err := c.NNSHash() if err != nil { return nil, err } - if len(arr) == 0 { + item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) + if err != nil { + return nil, err + } + + arr, ok := item.Value().([]stackitem.Item) + if !ok || len(arr) == 0 { return nil, errors.New("NNS record is missing") } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 448702613..dbd58a53a 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -38,7 +38,8 @@ type ( alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - proxy util.Uint160 + notary util.Uint160 + proxy util.Uint160 } notaryCfg struct { @@ -101,6 +102,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, alphabetSource: cfg.alphabetSource, + notary: notary.Hash, } c.notary = notaryCfg @@ -186,7 +188,7 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, - notary.Hash, + c.notary.notary, big.NewInt(int64(amount)), []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { @@ -461,7 +463,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { if r.State != vmstate.Halt.String() { - return ¬HaltStateError{state: r.State, exception: r.FaultException} + return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) } t.ValidUntilBlock = until @@ -608,7 +610,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) err := multisigAccount.ConvertMultisig(m, ir) if err != nil { - return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) + // wrap error as FrostFS-specific since the call is not related to any client + return nil, wrapFrostFSError(fmt.Errorf("convert account to inner ring multisig wallet: %w", err)) } } else { // alphabet multisig redeem script is @@ -616,7 +619,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB // inner ring multiaddress witness multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { - return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) + // wrap error as FrostFS-specific since the call is not related to any client + return nil, wrapFrostFSError(fmt.Errorf("make inner ring multisig wallet: %w", err)) } } diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index c4eb120d2..21adebd9e 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -206,9 +205,7 @@ func (ti *TestInvokePrm) SetArgs(args ...any) { } // TestInvoke calls TestInvoke method of Client with static internal script hash. -func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) { - _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method) - defer span.End() +func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) { return s.client.TestInvoke( s.scScriptHash, prm.method, diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index f7b6705a8..f68d39beb 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) { func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { return func(r *result.Invoke, t *transaction.Transaction) error { if r.State != HaltState { - return ¬HaltStateError{state: r.State, exception: r.FaultException} + return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) } t.SystemFee += add diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 87fcf84b8..962ec1bc2 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -33,13 +33,13 @@ func (w *waiterClient) GetVersion() (*result.Version, error) { // WaitTxHalt waits until transaction with the specified hash persists on the blockchain. // It also checks execution result to finish in HALT state. -func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error { +func (c *Client) WaitTxHalt(ctx context.Context, p InvokeRes) error { w, err := waiter.NewPollingBased(&waiterClient{c: c}) if err != nil { return fmt.Errorf("create tx waiter: %w", err) } - res, err := w.WaitAny(ctx, vub, h) + res, err := w.WaitAny(ctx, p.VUB, p.Hash) if err != nil { return fmt.Errorf("wait until tx persists: %w", err) } @@ -47,5 +47,5 @@ func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) err if res.VMState.HasFlag(vmstate.Halt) { return nil } - return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} + return wrapFrostFSError(¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}) } diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index e5cdfeef7..83f8bee07 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -134,8 +134,11 @@ func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - - l.listen(ctx, nil) + if err := l.listen(ctx, nil); err != nil { + l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, + zap.Error(err), + ) + } }) } @@ -149,17 +152,23 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - - l.listen(ctx, intError) + if err := l.listen(ctx, intError); err != nil { + l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, + zap.Error(err), + ) + l.sendError(ctx, intError, err) + } }) } -func (l *listener) listen(ctx context.Context, intError chan<- error) { +func (l *listener) listen(ctx context.Context, intError chan<- error) error { subErrCh := make(chan error) go l.subscribe(subErrCh) l.listenLoop(ctx, intError, subErrCh) + + return nil } func (l *listener) subscribe(errCh chan error) { diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go index 39c8f6237..4dcc0d035 100644 --- a/pkg/morph/event/netmap/epoch.go +++ b/pkg/morph/event/netmap/epoch.go @@ -41,7 +41,7 @@ func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { } return NewEpoch{ - Num: nee.Epoch.Uint64(), + Num: uint64(nee.Epoch.Uint64()), Hash: e.Container, }, nil } diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index b11973646..40f5984a9 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -199,8 +199,8 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { // neo-go API) // // this check prevents notary flow recursion - if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && - !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version + if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || + bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version return ErrTXAlreadyHandled } @@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 - !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 + bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } diff --git a/pkg/network/address.go b/pkg/network/address.go index 4643eef15..cb83a813d 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -2,11 +2,11 @@ package network import ( "errors" + "fmt" "net" "net/url" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -44,9 +44,11 @@ func (a Address) equal(addr Address) bool { // See also FromString. func (a Address) URIAddr() string { _, host, err := manet.DialArgs(a.ma) - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - assert.NoError(err, "could not get host addr") + if err != nil { + // the only correct way to construct Address is AddressFromString + // which makes this error appear unexpected + panic(fmt.Errorf("could not get host addr: %w", err)) + } if !a.IsTLSEnabled() { return host diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 54c1e18fb..1bcb83259 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -7,12 +7,10 @@ import ( "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -64,16 +62,12 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpcOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( - qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), - tagging.NewStreamClientInterceptor(), ), grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), diff --git a/pkg/network/group.go b/pkg/network/group.go index 0044fb2d4..9843b14d4 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,8 +3,6 @@ package network import ( "errors" "fmt" - "iter" - "slices" "sort" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -69,8 +67,9 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // Addresses must return an iterator over network addresses. - Addresses() iter.Seq[string] + // IterateAddresses must iterate over network addresses and pass each one + // to the handler until it returns true. + IterateAddresses(func(string) bool) // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int @@ -131,19 +130,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { // iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f // until 1st parsing failure or f's error. func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - for s := range iter.Addresses() { + iter.IterateAddresses(func(s string) bool { var a Address err = a.FromString(s) if err != nil { - return fmt.Errorf("could not parse address from string: %w", err) + err = fmt.Errorf("could not parse address from string: %w", err) + return true } err = f(a) - if err != nil { - return err - } - } + + return err != nil + }) return } @@ -165,8 +164,10 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) { // at least one common address. func (x AddressGroup) Intersects(x2 AddressGroup) bool { for i := range x { - if slices.ContainsFunc(x2, x[i].equal) { - return true + for j := range x2 { + if x[i].equal(x2[j]) { + return true + } } } diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go index d08264533..5b335fa52 100644 --- a/pkg/network/group_test.go +++ b/pkg/network/group_test.go @@ -1,8 +1,6 @@ package network import ( - "iter" - "slices" "sort" "testing" @@ -60,8 +58,10 @@ func TestAddressGroup_FromIterator(t *testing.T) { type testIterator []string -func (t testIterator) Addresses() iter.Seq[string] { - return slices.Values(t) +func (t testIterator) IterateAddresses(f func(string) bool) { + for i := range t { + f(t[i]) + } } func (t testIterator) NumberOfAddresses() int { diff --git a/pkg/network/validation.go b/pkg/network/validation.go index b5157f28f..92f650119 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -2,7 +2,6 @@ package network import ( "errors" - "iter" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -35,8 +34,8 @@ var ( // MultiAddressIterator. type NodeEndpointsIterator netmap.NodeInfo -func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { - return (netmap.NodeInfo)(x).NetworkEndpoints() +func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { + (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) } func (x NodeEndpointsIterator) NumberOfAddresses() int { diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go index 6c2df8428..b77d3e3e6 100644 --- a/pkg/services/accounting/morph/executor.go +++ b/pkg/services/accounting/morph/executor.go @@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor { } } -func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { +func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing account") @@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceReq return nil, fmt.Errorf("invalid account: %w", err) } - amount, err := s.client.BalanceOf(ctx, id) + amount, err := s.client.BalanceOf(id) if err != nil { return nil, err } - balancePrecision, err := s.client.Decimals(ctx) + balancePrecision, err := s.client.Decimals() if err != nil { return nil, err } diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go index 1d485321c..e64f9a8d1 100644 --- a/pkg/services/apemanager/errors/errors.go +++ b/pkg/services/apemanager/errors/errors.go @@ -9,9 +9,3 @@ func ErrAPEManagerAccessDenied(reason string) error { err.WriteReason(reason) return err } - -func ErrAPEManagerInvalidArgument(msg string) error { - err := new(apistatus.InvalidArgument) - err.SetMessage(msg) - return err -} diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index fc08fe569..cc792e23d 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -22,7 +22,6 @@ import ( policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/mr-tron/base58/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" "go.uber.org/zap" ) @@ -35,8 +34,6 @@ type cfg struct { type Service struct { cfg - waiter Waiter - cnrSrc containercore.Source contractStorage ape_contract.ProxyAdaptedContractStorage @@ -44,17 +41,11 @@ type Service struct { type Option func(*cfg) -type Waiter interface { - WaitTxHalt(context.Context, uint32, util.Uint256) error -} - -func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service { +func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service { s := &Service{ cnrSrc: cnrSrc, contractStorage: contractStorage, - - waiter: waiter, } for i := range opts { @@ -78,12 +69,12 @@ var _ Server = (*Service)(nil) // validateContainerTargetRequest validates request for the container target. // It checks if request actor is the owner of the container, otherwise it denies the request. -func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { +func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error { var cidSDK cidSDK.ID if err := cidSDK.DecodeString(cid); err != nil { - return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err)) + return fmt.Errorf("invalid CID format: %w", err) } - isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) + isOwner, err := s.isActorContainerOwner(cidSDK, pubKey) if err != nil { return fmt.Errorf("failed to check owner: %w", err) } @@ -93,7 +84,7 @@ func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string return nil } -func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { +func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -101,7 +92,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw()) if err != nil { - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error()) + return nil, err } if len(chain.ID) == 0 { const randomIDLength = 10 @@ -117,19 +108,15 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) + return nil, fmt.Errorf("unsupported target type: %s", targetType) } - txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) - if err != nil { - return nil, err - } - if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { + if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil { return nil, err } @@ -142,7 +129,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques return resp, nil } -func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { +func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -153,19 +140,15 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) + return nil, fmt.Errorf("unsupported target type: %s", targetType) } - txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) - if err != nil { - return nil, err - } - if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { + if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil { return nil, err } @@ -177,7 +160,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain return resp, nil } -func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { +func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -188,12 +171,12 @@ func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRe switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) + return nil, fmt.Errorf("unsupported target type: %s", targetType) } chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target) @@ -227,23 +210,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK } sig := vh.GetBodySignature() if sig == nil { - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error()) + return nil, errEmptyBodySignature } key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) if err != nil { - return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err)) + return nil, fmt.Errorf("invalid signature key: %w", err) } return key, nil } -func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { +func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - cnr, err := s.cnrSrc.Get(ctx, cid) + cnr, err := s.cnrSrc.Get(cid) if err != nil { return false, fmt.Errorf("get container error: %w", err) } diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index eb6263320..86021c3db 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -1,7 +1,6 @@ package ape import ( - "context" "crypto/ecdsa" "errors" "fmt" @@ -20,6 +19,7 @@ import ( ) var ( + errInvalidTargetType = errors.New("bearer token defines non-container target override") errBearerExpired = errors.New("bearer token has expired") errBearerInvalidSignature = errors.New("bearer token has invalid signature") errBearerInvalidContainerID = errors.New("bearer token was created for another container") @@ -48,7 +48,7 @@ type CheckPrm struct { // CheckCore provides methods to perform the common logic of APE check. type CheckCore interface { // CheckAPE performs the common policy-engine check logic on a prepared request. - CheckAPE(ctx context.Context, prm CheckPrm) error + CheckAPE(prm CheckPrm) error } type checkerCoreImpl struct { @@ -70,30 +70,22 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora } // CheckAPE performs the common policy-engine check logic on a prepared request. -func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { +func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { var cr policyengine.ChainRouter - if prm.BearerToken != nil { + if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { var err error if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { return fmt.Errorf("bearer validation error: %w", err) } - if prm.BearerToken.Impersonate() { - cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) - } else { - override, isSet := prm.BearerToken.APEOverride() - if !isSet { - return errors.New("expected for override within bearer") - } - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) - } + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) + if err != nil { + return fmt.Errorf("create chain router error: %w", err) } } else { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) } - groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey) + groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -133,19 +125,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe } // Check for ape overrides defined in the bearer token. - if apeOverride, isSet := token.APEOverride(); isSet { - switch apeOverride.Target.TargetType { - case ape.TargetTypeContainer: - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID - } - default: - } + apeOverride := token.APEOverride() + if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { + return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) + } + + // Then check if container is either empty or equal to the container in the request. + var targetCnr cid.ID + err := targetCnr.DecodeString(apeOverride.Target.Name) + if err != nil { + return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) + } + if !cntID.Equals(targetCnr) { + return errBearerInvalidContainerID } // Then check if container owner signed this token. @@ -157,16 +149,8 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe var usrSender user.ID user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's - // public key, but not the actual sender. - if !token.Impersonate() { - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner - } - } else { - if !bearer.ResolveIssuer(*token).Equals(usrSender) { - return errBearerInvalidOwner - } + if !token.AssertUser(usrSender) { + return errBearerInvalidOwner } return nil diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 3b5dab9aa..493452fa6 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -49,11 +49,11 @@ var ( ) type ir interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) + InnerRingKeys() ([][]byte, error) } type containers interface { - Get(context.Context, cid.ID) (*containercore.Container, error) + Get(cid.ID) (*containercore.Container, error) } type apeChecker struct { @@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) if err != nil { return nil, err } @@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co } } - namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) + namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) if err != nil { return nil, fmt.Errorf("could not get owner namespace: %w", err) } - if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { + if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { return nil, err } @@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co reqProps, ) - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -179,7 +179,7 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return err } @@ -189,7 +189,7 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) if err != nil { return err } @@ -199,11 +199,11 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt } } - namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) + namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) if err != nil { return fmt.Errorf("could not get owner namespace: %w", err) } - if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { + if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { return err } @@ -216,7 +216,7 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt reqProps, ) - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ac.frostFSIDClient, pk) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -252,7 +252,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -262,7 +262,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) if err != nil { return nil, err } @@ -272,7 +272,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont } } - namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID()) + namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID()) if err != nil { return nil, fmt.Errorf("get namespace error: %w", err) } @@ -280,21 +280,16 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, err } - cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer()) - if err != nil { - return nil, fmt.Errorf("get container properties: %w", err) - } - request := aperequest.NewRequest( nativeschema.MethodPutContainer, aperequest.NewResource( resourceName(namespace, ""), - cnrProps, + make(map[string]string), ), reqProps, ) - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -326,7 +321,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, apeErr(nativeschema.MethodPutContainer, s) } -func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { +func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { if vh == nil { return "", nil, errMissingVerificationHeader } @@ -349,7 +344,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.O } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(ctx, pkBytes) + isIR, err := ac.isInnerRingKey(pkBytes) if err != nil { return "", nil, err } @@ -370,7 +365,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con return err } - cont, err := ac.reader.Get(ctx, id) + cont, err := ac.reader.Get(id) if err != nil { return err } @@ -386,7 +381,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con namespace = cntNamespace } - groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ac.frostFSIDClient, pk) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -400,7 +395,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con op, aperequest.NewResource( resourceName(namespace, id.EncodeToString()), - getContainerProps(cont), + ac.getContainerProps(cont), ), reqProps, ) @@ -450,26 +445,10 @@ func resourceName(namespace string, container string) string { return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container) } -func getContainerProps(c *containercore.Container) map[string]string { - props := map[string]string{ +func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string { + return map[string]string{ nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(), } - for attrName, attrVal := range c.Value.Attributes() { - name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName) - props[name] = attrVal - } - return props -} - -func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) { - if cnrV2 == nil { - return nil, errors.New("container is not set") - } - c := cnrSDK.Container{} - if err := c.ReadFromV2(*cnrV2); err != nil { - return nil, err - } - return getContainerProps(&containercore.Container{Value: c}), nil } func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, @@ -479,7 +458,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe if err != nil { return nil, nil, err } - role, err := ac.getRole(ctx, actor, pk, cont, cnrID) + role, err := ac.getRole(actor, pk, cont, cnrID) if err != nil { return nil, nil, err } @@ -487,7 +466,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) if err != nil { return nil, nil, err } @@ -499,13 +478,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe return reqProps, pk, nil } -func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { +func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { if cont.Value.Owner().Equals(*actor) { return nativeschema.PropertyValueContainerRoleOwner, nil } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(ctx, pkBytes) + isIR, err := ac.isInnerRingKey(pkBytes) if err != nil { return "", err } @@ -513,7 +492,7 @@ func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.Publ return nativeschema.PropertyValueContainerRoleIR, nil } - isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont) + isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont) if err != nil { return "", err } @@ -607,8 +586,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) { - innerRingKeys, err := ac.ir.InnerRingKeys(ctx) +func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { + innerRingKeys, err := ac.ir.InnerRingKeys() if err != nil { return false, err } @@ -622,11 +601,11 @@ func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, erro return false, nil } -func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { +func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { binCnrID := make([]byte, sha256.Size) cnrID.Encode(binCnrID) - nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm) + nm, err := netmap.GetLatestNetworkMap(ac.nm) if err != nil { return false, err } @@ -637,7 +616,7 @@ func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.I // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm) + nm, err = netmap.GetPreviousNetworkMap(ac.nm) if err != nil { return false, err } @@ -662,7 +641,7 @@ func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containerc return false } -func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -670,19 +649,24 @@ func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr := ownerSDK.ScriptHash() + addr, err := ownerSDK.ScriptHash() + if err != nil { + return "", err + } namespace := "" - subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) + subject, err := ac.frostFSIDClient.GetSubject(addr) if err == nil { namespace = subject.Namespace - } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) + } else { + if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { + return "", fmt.Errorf("get subject error: %w", err) + } } return namespace, nil } -func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -690,8 +674,11 @@ func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.Own if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr := ownerSDK.ScriptHash() - subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) + addr, err := ownerSDK.ScriptHash() + if err != nil { + return "", err + } + subject, err := ac.frostFSIDClient.GetSubject(addr) if err != nil { return "", fmt.Errorf("get subject error: %w", err) } @@ -725,12 +712,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro // validateNamespace validates if a namespace of a request actor equals to owner's namespace. // An actor's namespace is calculated by a public key. -func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error { +func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID) + actorNamespace, err := ac.namespaceByOwner(actorOwnerID) if err != nil { return fmt.Errorf("could not get actor namespace: %w", err) } @@ -741,11 +728,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { +func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index 6438c34ca..513ffff02 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -54,8 +54,6 @@ func TestAPE(t *testing.T) { t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace) t.Run("deny list containers for owner with PK", testDenyListContainersForPK) t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError) - t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr) - t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr) } const ( @@ -566,185 +564,6 @@ func testDenyGetContainerByIP(t *testing.T) { require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) } -func testDenyGetContainerSysZoneAttr(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - }, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - pk.PublicKey().GetScriptHash(): { - KV: map[string]string{ - "tag-attr1": "value1", - "tag-attr2": "value2", - }, - Groups: []*client.Group{ - { - ID: 19888, - }, - }, - }, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - testContainer.SetAttribute(container.SysAttributeZone, "eggplant") - contRdr.c[contID] = &containercore.Container{Value: testContainer} - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodGetContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindResource, - Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), - Value: "eggplant", - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := &container.GetRequest{} - req.SetBody(&container.GetRequestBody{}) - var refContID refs.ContainerID - contID.WriteToV2(&refContID) - req.GetBody().SetContainerID(&refContID) - - require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) - - resp, err := apeSrv.Get(ctxWithPeerInfo(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) -} - -func testDenyPutContainerSysZoneAttr(t *testing.T) { - t.Parallel() - srv := &srvStub{ - calls: map[string]int{}, - } - router := inmemory.NewInMemory() - contRdr := &containerStub{ - c: map[cid.ID]*containercore.Container{}, - } - ir := &irStub{ - keys: [][]byte{}, - } - nm := &netmapStub{} - - contID := cidtest.ID() - testContainer := containertest.Container() - pp := netmap.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) - testContainer.SetPlacementPolicy(pp) - testContainer.SetAttribute(container.SysAttributeZone, "eggplant") - contRdr.c[contID] = &containercore.Container{Value: testContainer} - owner := testContainer.Owner() - ownerAddr := owner.ScriptHash() - - frostfsIDSubjectReader := &frostfsidStub{ - subjects: map[util.Uint160]*client.Subject{ - ownerAddr: {}, - }, - subjectsExt: map[util.Uint160]*client.SubjectExtended{ - ownerAddr: {}, - }, - } - - apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) - - nm.currentEpoch = 100 - nm.netmaps = map[uint64]*netmap.NetMap{} - var testNetmap netmap.NetMap - testNetmap.SetEpoch(nm.currentEpoch) - testNetmap.SetNodes([]netmap.NodeInfo{{}}) - nm.netmaps[nm.currentEpoch] = &testNetmap - nm.netmaps[nm.currentEpoch-1] = &testNetmap - - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.AccessDenied, - Actions: chain.Actions{ - Names: []string{ - nativeschema.MethodPutContainer, - }, - }, - Resources: chain.Resources{ - Names: []string{ - nativeschema.ResourceFormatRootContainers, - }, - }, - Condition: []chain.Condition{ - { - Kind: chain.KindResource, - Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), - Value: "eggplant", - Op: chain.CondStringEquals, - }, - }, - }, - }, - }) - require.NoError(t, err) - - req := initPutRequest(t, testContainer) - - resp, err := apeSrv.Put(ctxWithPeerInfo(), req) - require.Nil(t, resp) - var errAccessDenied *apistatus.ObjectAccessDenied - require.ErrorAs(t, err, &errAccessDenied) - require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) -} - func testDenyGetContainerByGroupID(t *testing.T) { t.Parallel() srv := &srvStub{ @@ -859,7 +678,8 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { testContainer := containertest.Container() owner := testContainer.Owner() - ownerAddr := owner.ScriptHash() + ownerAddr, err := owner.ScriptHash() + require.NoError(t, err) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ ownerAddr: {}, @@ -870,7 +690,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { nm.currentEpoch = 100 nm.netmaps = map[uint64]*netmap.NetMap{} - _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ + _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ Rules: []chain.Rule{ { Status: chain.AccessDenied, @@ -953,7 +773,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(testContainer) + ownerScriptHash := initOwnerIDScriptHash(t, testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -1037,7 +857,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(testContainer) + ownerScriptHash := initOwnerIDScriptHash(t, testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -1273,7 +1093,7 @@ type irStub struct { keys [][]byte } -func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) { +func (s *irStub) InnerRingKeys() ([][]byte, error) { return s.keys, nil } @@ -1281,7 +1101,7 @@ type containerStub struct { c map[cid.ID]*containercore.Container } -func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) { +func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) { if v, ok := s.c[id]; ok { return v, nil } @@ -1293,21 +1113,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) + return s.GetNetMapByEpoch(s.currentEpoch - diff) } -func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { +func (s *netmapStub) Epoch() (uint64, error) { return s.currentEpoch, nil } @@ -1316,7 +1136,7 @@ type frostfsidStub struct { subjectsExt map[util.Uint160]*client.SubjectExtended } -func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) { +func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) { s, ok := f.subjects[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1324,7 +1144,7 @@ func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*cl return s, nil } -func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) { s, ok := f.subjectsExt[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1712,21 +1532,26 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put return req } -func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 { +func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 { var ownerSDK *user.ID owner := testContainer.Owner() ownerSDK = &owner - return ownerSDK.ScriptHash() + sc, err := ownerSDK.ScriptHash() + require.NoError(t, err) + return sc } func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) { var actorUserID user.ID user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey())) - actorScriptHash = actorUserID.ScriptHash() + var err error + actorScriptHash, err = actorUserID.ScriptHash() + require.NoError(t, err) var ownerUserID user.ID user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey())) - ownerScriptHash = ownerUserID.ScriptHash() + ownerScriptHash, err = ownerUserID.ScriptHash() + require.NoError(t, err) require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String()) return } diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index eaa608eba..cadf92e19 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -29,8 +29,8 @@ type Reader interface { // ContainersOf returns a list of container identifiers belonging // to the specified user of FrostFS system. Returns the identifiers // of all FrostFS containers if pointer to owner identifier is nil. - ContainersOf(context.Context, *user.ID) ([]cid.ID, error) - IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error + ContainersOf(*user.ID) ([]cid.ID, error) + IterateContainersOf(*user.ID, func(cid.ID) error) error } // Writer is an interface of container storage updater. @@ -133,7 +133,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body return new(container.DeleteResponseBody), nil } -func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { +func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -146,7 +146,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) return nil, fmt.Errorf("invalid container ID: %w", err) } - cnr, err := s.rdr.Get(ctx, id) + cnr, err := s.rdr.Get(id) if err != nil { return nil, err } @@ -173,7 +173,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) return res, nil } -func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { +func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errMissingUserID @@ -186,7 +186,7 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod return nil, fmt.Errorf("invalid user ID: %w", err) } - cnrs, err := s.rdr.ContainersOf(ctx, &id) + cnrs, err := s.rdr.ContainersOf(&id) if err != nil { return nil, err } @@ -243,7 +243,7 @@ func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStrea return nil } - if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil { + if err = s.rdr.IterateContainersOf(&id, processCID); err != nil { return err } diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 0509d2646..e2c385c6a 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -48,7 +48,7 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) ( resp := new(control.TickEpochResponse) resp.SetBody(new(control.TickEpochResponse_Body)) - epoch, err := s.netmapClient.Epoch(ctx) + epoch, err := s.netmapClient.Epoch() if err != nil { return nil, fmt.Errorf("getting current epoch: %w", err) } @@ -77,7 +77,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) resp := new(control.RemoveNodeResponse) resp.SetBody(new(control.RemoveNodeResponse_Body)) - nm, err := s.netmapClient.NetMap(ctx) + nm, err := s.netmapClient.NetMap() if err != nil { return nil, fmt.Errorf("getting netmap: %w", err) } @@ -138,7 +138,7 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error()) } - cids, err := s.containerClient.ContainersOf(ctx, &owner) + cids, err := s.containerClient.ContainersOf(&owner) if err != nil { return nil, fmt.Errorf("failed to get owner's containers: %w", err) } diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index 0cfca71c1..c2a4f88a6 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -35,7 +35,8 @@ func panicOnPrmValue(n string, v any) { // the parameterized private key. func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { // verify required parameters - if prm.healthChecker == nil { + switch { + case prm.healthChecker == nil: panicOnPrmValue("health checker", prm.healthChecker) } diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 0c4236d0e..6982d780d 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -1,8 +1,6 @@ package control import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" ) @@ -32,7 +30,6 @@ const ( rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides" rpcDetachShards = "DetachShards" rpcStartShardRebuild = "StartShardRebuild" - rpcListShardsForObject = "ListShardsForObject" ) // HealthCheck executes ControlService.HealthCheck RPC. @@ -76,7 +73,6 @@ func SetNetmapStatus( // GetNetmapStatus executes ControlService.GetNetmapStatus RPC. func GetNetmapStatus( - _ context.Context, cli *client.Client, req *GetNetmapStatusRequest, opts ...client.CallOption, @@ -365,22 +361,3 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts . return wResp.message, nil } - -// ListShardsForObject executes ControlService.ListShardsForObject RPC. -func ListShardsForObject( - cli *client.Client, - req *ListShardsForObjectRequest, - opts ...client.CallOption, -) (*ListShardsForObjectResponse, error) { - wResp := newResponseWrapper[ListShardsForObjectResponse]() - - wReq := &requestWrapper{ - m: req, - } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index f3ba9015e..da5401515 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -157,7 +157,7 @@ func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *obj return false, nil } - nodes, err := s.getContainerNodes(ctx, cid) + nodes, err := s.getContainerNodes(cid) if err != nil { return false, err } @@ -182,7 +182,7 @@ func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *obj } func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { - nodes, err := s.getContainerNodes(ctx, contID) + nodes, err := s.getContainerNodes(contID) if err != nil { return false, "", err } @@ -220,7 +220,7 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest TreeId: treeID, Operation: &tree.LogMove{ ParentId: op.Parent, - Meta: op.Bytes(), + Meta: op.Meta.Bytes(), ChildId: op.Child, }, }, @@ -240,13 +240,13 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest } } -func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) { - nm, err := s.netMapSrc.GetNetMap(ctx, 0) +func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { + nm, err := s.netMapSrc.GetNetMap(0) if err != nil { return nil, err } - c, err := s.cnrSrc.Get(ctx, contID) + c, err := s.cnrSrc.Get(contID) if err != nil { return nil, err } diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go index a8ef7809e..d9fefc38e 100644 --- a/pkg/services/control/server/gc.go +++ b/pkg/services/control/server/gc.go @@ -42,7 +42,8 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques prm.WithForceRemoval() prm.WithAddress(addrList[i]) - if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil { + _, err := s.s.Delete(ctx, prm) + if err != nil && firstErr == nil { firstErr = err } } diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go index 5e0496910..1c038253a 100644 --- a/pkg/services/control/server/get_netmap_status.go +++ b/pkg/services/control/server/get_netmap_status.go @@ -10,12 +10,12 @@ import ( ) // GetNetmapStatus gets node status in FrostFS network. -func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { +func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } - st, epoch, err := s.nodeState.GetNetmapStatus(ctx) + st, epoch, err := s.nodeState.GetNetmapStatus() if err != nil { return nil, err } diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go deleted file mode 100644 index 39565ed50..000000000 --- a/pkg/services/control/server/list_shards_for_object.go +++ /dev/null @@ -1,65 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - var obj oid.ID - err = obj.DecodeString(req.GetBody().GetObjectId()) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - var cnr cid.ID - err = cnr.DecodeString(req.GetBody().GetContainerId()) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - resp := new(control.ListShardsForObjectResponse) - body := new(control.ListShardsForObjectResponse_Body) - resp.SetBody(body) - - var objAddr oid.Address - objAddr.SetContainer(cnr) - objAddr.SetObject(obj) - info, err := s.s.ListShardsForObject(ctx, objAddr) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - if len(info) == 0 { - return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject) - } - - body.SetShard_ID(shardInfoToProto(info)) - - // Sign the response - if err := ctrlmessage.Sign(s.key, resp); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func shardInfoToProto(infos []shard.Info) [][]byte { - shardInfos := make([][]byte, 0, len(infos)) - for _, info := range infos { - shardInfos = append(shardInfos, *info.ID) - } - - return shardInfos -} diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index 59d701bc6..94aa1ff5b 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -52,7 +52,7 @@ type NodeState interface { // but starts local maintenance regardless of the network settings. ForceMaintenance(ctx context.Context) error - GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) + GetNetmapStatus() (control.NetmapStatus, uint64, error) } // LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 4c539acfc..97ecf9a8c 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -89,9 +89,6 @@ service ControlService { // StartShardRebuild starts shard rebuild process. rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse); - - // ListShardsForObject returns shard info where object is stored. - rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse); } // Health check request. @@ -732,23 +729,3 @@ message StartShardRebuildResponse { Signature signature = 2; } - -message ListShardsForObjectRequest { - message Body { - string object_id = 1; - string container_id = 2; - } - - Body body = 1; - Signature signature = 2; -} - -message ListShardsForObjectResponse { - message Body { - // List of the node's shards storing object. - repeated bytes shard_ID = 1; - } - - Body body = 1; - Signature signature = 2; -} diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 44849d591..0b4e3cf32 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -17303,727 +17303,3 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { in.Consumed() } } - -type ListShardsForObjectRequest_Body struct { - ObjectId string `json:"objectId"` - ContainerId string `json:"containerId"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil) - _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil) - _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectRequest_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.StringSize(1, x.ObjectId) - size += proto.StringSize(2, x.ContainerId) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if len(x.ObjectId) != 0 { - mm.AppendString(1, x.ObjectId) - } - if len(x.ContainerId) != 0 { - mm.AppendString(2, x.ContainerId) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body") - } - switch fc.FieldNum { - case 1: // ObjectId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ObjectId") - } - x.ObjectId = data - case 2: // ContainerId - data, ok := fc.String() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "ContainerId") - } - x.ContainerId = data - } - } - return nil -} -func (x *ListShardsForObjectRequest_Body) GetObjectId() string { - if x != nil { - return x.ObjectId - } - return "" -} -func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) { - x.ObjectId = v -} -func (x *ListShardsForObjectRequest_Body) GetContainerId() string { - if x != nil { - return x.ContainerId - } - return "" -} -func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) { - x.ContainerId = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"objectId\":" - out.RawString(prefix) - out.String(x.ObjectId) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"containerId\":" - out.RawString(prefix) - out.String(x.ContainerId) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "objectId": - { - var f string - f = in.String() - x.ObjectId = f - } - case "containerId": - { - var f string - f = in.String() - x.ContainerId = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectRequest struct { - Body *ListShardsForObjectRequest_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil) - _ json.Marshaler = (*ListShardsForObjectRequest)(nil) - _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectRequest) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListShardsForObjectRequest) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListShardsForObjectRequest_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) { - x.Body = v -} -func (x *ListShardsForObjectRequest) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListShardsForObjectRequest) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListShardsForObjectRequest_Body - f = new(ListShardsForObjectRequest_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectResponse_Body struct { - Shard_ID [][]byte `json:"shardID"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil) - _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil) - _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectResponse_Body) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.RepeatedBytesSize(1, x.Shard_ID) - return size -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - for j := range x.Shard_ID { - mm.AppendBytes(1, x.Shard_ID[j]) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body") - } - switch fc.FieldNum { - case 1: // Shard_ID - data, ok := fc.Bytes() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") - } - x.Shard_ID = append(x.Shard_ID, data) - } - } - return nil -} -func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte { - if x != nil { - return x.Shard_ID - } - return nil -} -func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) { - x.Shard_ID = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"shardID\":" - out.RawString(prefix) - out.RawByte('[') - for i := range x.Shard_ID { - if i != 0 { - out.RawByte(',') - } - if x.Shard_ID[i] != nil { - out.Base64Bytes(x.Shard_ID[i]) - } else { - out.String("") - } - } - out.RawByte(']') - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "shardID": - { - var f []byte - var list [][]byte - in.Delim('[') - for !in.IsDelim(']') { - { - tmp := in.Bytes() - if len(tmp) == 0 { - tmp = nil - } - f = tmp - } - list = append(list, f) - in.WantComma() - } - x.Shard_ID = list - in.Delim(']') - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} - -type ListShardsForObjectResponse struct { - Body *ListShardsForObjectResponse_Body `json:"body"` - Signature *Signature `json:"signature"` -} - -var ( - _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil) - _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil) - _ json.Marshaler = (*ListShardsForObjectResponse)(nil) - _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil) -) - -// StableSize returns the size of x in protobuf format. -// -// Structures with the same field values have the same binary size. -func (x *ListShardsForObjectResponse) StableSize() (size int) { - if x == nil { - return 0 - } - size += proto.NestedStructureSize(1, x.Body) - size += proto.NestedStructureSize(2, x.Signature) - return size -} - -// ReadSignedData fills buf with signed data of x. -// If buffer length is less than x.SignedDataSize(), new buffer is allocated. -// -// Returns any error encountered which did not allow writing the data completely. -// Otherwise, returns the buffer in which the data is written. -// -// Structures with the same field values have the same signed data. -func (x *ListShardsForObjectResponse) SignedDataSize() int { - return x.GetBody().StableSize() -} - -// SignedDataSize returns size of the request signed data in bytes. -// -// Structures with the same field values have the same signed data size. -func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) { - return x.GetBody().MarshalProtobuf(buf), nil -} - -// MarshalProtobuf implements the encoding.ProtoMarshaler interface. -func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte { - m := pool.MarshalerPool.Get() - defer pool.MarshalerPool.Put(m) - x.EmitProtobuf(m.MessageMarshaler()) - dst = m.Marshal(dst) - return dst -} - -func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { - if x == nil { - return - } - if x.Body != nil { - x.Body.EmitProtobuf(mm.AppendMessage(1)) - } - if x.Signature != nil { - x.Signature.EmitProtobuf(mm.AppendMessage(2)) - } -} - -// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. -func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) { - var fc easyproto.FieldContext - for len(src) > 0 { - src, err = fc.NextField(src) - if err != nil { - return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse") - } - switch fc.FieldNum { - case 1: // Body - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Body") - } - x.Body = new(ListShardsForObjectResponse_Body) - if err := x.Body.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - case 2: // Signature - data, ok := fc.MessageData() - if !ok { - return fmt.Errorf("cannot unmarshal field %s", "Signature") - } - x.Signature = new(Signature) - if err := x.Signature.UnmarshalProtobuf(data); err != nil { - return fmt.Errorf("unmarshal: %w", err) - } - } - } - return nil -} -func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body { - if x != nil { - return x.Body - } - return nil -} -func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) { - x.Body = v -} -func (x *ListShardsForObjectResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} -func (x *ListShardsForObjectResponse) SetSignature(v *Signature) { - x.Signature = v -} - -// MarshalJSON implements the json.Marshaler interface. -func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - x.MarshalEasyJSON(&w) - return w.Buffer.BuildBytes(), w.Error -} -func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) { - if x == nil { - out.RawString("null") - return - } - first := true - out.RawByte('{') - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"body\":" - out.RawString(prefix) - x.Body.MarshalEasyJSON(out) - } - { - if !first { - out.RawByte(',') - } else { - first = false - } - const prefix string = "\"signature\":" - out.RawString(prefix) - x.Signature.MarshalEasyJSON(out) - } - out.RawByte('}') -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - x.UnmarshalEasyJSON(&r) - return r.Error() -} -func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeFieldName(false) - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "body": - { - var f *ListShardsForObjectResponse_Body - f = new(ListShardsForObjectResponse_Body) - f.UnmarshalEasyJSON(in) - x.Body = f - } - case "signature": - { - var f *Signature - f = new(Signature) - f.UnmarshalEasyJSON(in) - x.Signature = f - } - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index 045662ccf..987e08c59 100644 --- a/pkg/services/control/service_grpc.pb.go +++ b/pkg/services/control/service_grpc.pb.go @@ -41,7 +41,6 @@ const ( ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache" ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards" ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild" - ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject" ) // ControlServiceClient is the client API for ControlService service. @@ -96,8 +95,6 @@ type ControlServiceClient interface { DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) - // ListShardsForObject returns shard info where object is stored. - ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) } type controlServiceClient struct { @@ -306,15 +303,6 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS return out, nil } -func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) { - out := new(ListShardsForObjectResponse) - err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // ControlServiceServer is the server API for ControlService service. // All implementations should embed UnimplementedControlServiceServer // for forward compatibility @@ -367,8 +355,6 @@ type ControlServiceServer interface { DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) - // ListShardsForObject returns shard info where object is stored. - ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) } // UnimplementedControlServiceServer should be embedded to have forward compatible implementations. @@ -441,9 +427,6 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented") } -func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented") -} // UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ControlServiceServer will @@ -852,24 +835,6 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } -func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListShardsForObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).ListShardsForObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_ListShardsForObject_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - // ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -965,10 +930,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "StartShardRebuild", Handler: _ControlService_StartShardRebuild_Handler, }, - { - MethodName: "ListShardsForObject", - Handler: _ControlService_ListShardsForObject_Handler, - }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/services/control/service.proto", diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 1b92fdaad..5223047df 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" @@ -43,16 +42,14 @@ type NetworkInfo interface { // Dump must return recent network information in FrostFS API v2 NetworkInfo structure. // // If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset. - Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error) + Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error) } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - // this should never happen, otherwise it's a programmer's bug - msg := "BUG: can't create netmap execution service" - assert.False(s == nil, msg, "node state is nil") - assert.False(netInfo == nil, msg, "network info is nil") - assert.False(respSvc == nil, msg, "response service is nil") - assert.True(version.IsValid(v), msg, "invalid version") + if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil { + // this should never happen, otherwise it programmers bug + panic("can't create netmap execution service") + } res := &executorSvc{ state: s, @@ -85,7 +82,7 @@ func (s *executorSvc) LocalNodeInfo( } func (s *executorSvc) NetworkInfo( - ctx context.Context, + _ context.Context, req *netmap.NetworkInfoRequest, ) (*netmap.NetworkInfoResponse, error) { verV2 := req.GetMetaHeader().GetVersion() @@ -98,7 +95,7 @@ func (s *executorSvc) NetworkInfo( return nil, fmt.Errorf("can't read version: %w", err) } - ni, err := s.netInfo.Dump(ctx, ver) + ni, err := s.netInfo.Dump(ver) if err != nil { return nil, err } diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go new file mode 100644 index 000000000..94e015abe --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/eacl_test.go @@ -0,0 +1,166 @@ +package v2 + +import ( + "context" + "crypto/ecdsa" + "errors" + "testing" + + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +type testLocalStorage struct { + t *testing.T + + expAddr oid.Address + + obj *objectSDK.Object + + err error +} + +func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { + require.True(s.t, addr.Container().Equals(s.expAddr.Container())) + require.True(s.t, addr.Object().Equals(s.expAddr.Object())) + + return s.obj, s.err +} + +func testXHeaders(strs ...string) []session.XHeader { + res := make([]session.XHeader, len(strs)/2) + + for i := 0; i < len(strs); i += 2 { + res[i/2].SetKey(strs[i]) + res[i/2].SetValue(strs[i+1]) + } + + return res +} + +func TestHeadRequest(t *testing.T) { + req := new(objectV2.HeadRequest) + + meta := new(session.RequestMetaHeader) + req.SetMetaHeader(meta) + + body := new(objectV2.HeadRequestBody) + req.SetBody(body) + + addr := oidtest.Address() + + var addrV2 refs.Address + addr.WriteToV2(&addrV2) + + body.SetAddress(&addrV2) + + xKey := "x-key" + xVal := "x-val" + xHdrs := testXHeaders( + xKey, xVal, + ) + + meta.SetXHeaders(xHdrs) + + obj := objectSDK.New() + + attrKey := "attr_key" + attrVal := "attr_val" + var attr objectSDK.Attribute + attr.SetKey(attrKey) + attr.SetValue(attrVal) + obj.SetAttributes(attr) + + table := new(eaclSDK.Table) + + priv, err := keys.NewPrivateKey() + require.NoError(t, err) + senderKey := priv.PublicKey() + + r := eaclSDK.NewRecord() + r.SetOperation(eaclSDK.OperationHead) + r.SetAction(eaclSDK.ActionDeny) + r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal) + r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal) + eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) + + table.AddRecord(r) + + lStorage := &testLocalStorage{ + t: t, + expAddr: addr, + obj: obj, + } + + id := addr.Object() + + newSource := func(t *testing.T) eaclSDK.TypedHeaderSource { + hdrSrc, err := NewMessageHeaderSource( + lStorage, + NewRequestXHeaderSource(req), + addr.Container(), + WithOID(&id)) + require.NoError(t, err) + return hdrSrc + } + + cnr := addr.Container() + + unit := new(eaclSDK.ValidationUnit). + WithContainerID(&cnr). + WithOperation(eaclSDK.OperationHead). + WithSenderKey(senderKey.Bytes()). + WithEACLTable(table) + + validator := eaclSDK.NewValidator() + + checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t))) + + meta.SetXHeaders(nil) + + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) + + meta.SetXHeaders(xHdrs) + + obj.SetAttributes() + + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) + + lStorage.err = errors.New("any error") + + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) + + r.SetAction(eaclSDK.ActionAllow) + + rID := eaclSDK.NewRecord() + rID.SetOperation(eaclSDK.OperationHead) + rID.SetAction(eaclSDK.ActionDeny) + rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object()) + eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) + + table = eaclSDK.NewTable() + table.AddRecord(r) + table.AddRecord(rID) + + unit.WithEACLTable(table) + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) +} + +func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { + actual, fromRule := v.CalculateAction(u) + require.True(t, fromRule) + require.Equal(t, expected, actual) +} + +func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { + actual, fromRule := v.CalculateAction(u) + require.False(t, fromRule) + require.Equal(t, eaclSDK.ActionAllow, actual) +} diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go new file mode 100644 index 000000000..ecb793df8 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/headers.go @@ -0,0 +1,246 @@ +package v2 + +import ( + "context" + "errors" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" +) + +type Option func(*cfg) + +type cfg struct { + storage ObjectStorage + + msg XHeaderSource + + cnr cid.ID + obj *oid.ID +} + +type ObjectStorage interface { + Head(context.Context, oid.Address) (*objectSDK.Object, error) +} + +type Request interface { + GetMetaHeader() *session.RequestMetaHeader +} + +type Response interface { + GetMetaHeader() *session.ResponseMetaHeader +} + +type headerSource struct { + requestHeaders []eaclSDK.Header + objectHeaders []eaclSDK.Header + + incompleteObjectHeaders bool +} + +func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) { + cfg := &cfg{ + storage: os, + cnr: cnrID, + msg: xhs, + } + + for i := range opts { + opts[i](cfg) + } + + if cfg.msg == nil { + return nil, errors.New("message is not provided") + } + + var res headerSource + + err := cfg.readObjectHeaders(&res) + if err != nil { + return nil, err + } + + res.requestHeaders = cfg.msg.GetXHeaders() + + return res, nil +} + +func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) { + switch typ { + default: + return nil, true + case eaclSDK.HeaderFromRequest: + return h.requestHeaders, true + case eaclSDK.HeaderFromObject: + return h.objectHeaders, !h.incompleteObjectHeaders + } +} + +type xHeader session.XHeader + +func (x xHeader) Key() string { + return (*session.XHeader)(&x).GetKey() +} + +func (x xHeader) Value() string { + return (*session.XHeader)(&x).GetValue() +} + +var errMissingOID = errors.New("object ID is missing") + +func (h *cfg) readObjectHeaders(dst *headerSource) error { + switch m := h.msg.(type) { + default: + panic(fmt.Sprintf("unexpected message type %T", h.msg)) + case requestXHeaderSource: + return h.readObjectHeadersFromRequestXHeaderSource(m, dst) + case responseXHeaderSource: + return h.readObjectHeadersResponseXHeaderSource(m, dst) + } +} + +func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error { + switch req := m.req.(type) { + case + *objectV2.GetRequest, + *objectV2.HeadRequest: + if h.obj == nil { + return errMissingOID + } + + objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) + + dst.objectHeaders = objHeaders + dst.incompleteObjectHeaders = !completed + case + *objectV2.GetRangeRequest, + *objectV2.GetRangeHashRequest, + *objectV2.DeleteRequest: + if h.obj == nil { + return errMissingOID + } + + dst.objectHeaders = addressHeaders(h.cnr, h.obj) + case *objectV2.PutRequest: + if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { + oV2 := new(objectV2.Object) + oV2.SetObjectID(v.GetObjectID()) + oV2.SetHeader(v.GetHeader()) + + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) + } + case *objectV2.PutSingleRequest: + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj) + case *objectV2.SearchRequest: + cnrV2 := req.GetBody().GetContainerID() + var cnr cid.ID + + if cnrV2 != nil { + if err := cnr.ReadFromV2(*cnrV2); err != nil { + return fmt.Errorf("can't parse container ID: %w", err) + } + } + + dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)} + } + return nil +} + +func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error { + switch resp := m.resp.(type) { + default: + objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) + + dst.objectHeaders = objectHeaders + dst.incompleteObjectHeaders = !completed + case *objectV2.GetResponse: + if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { + oV2 := new(objectV2.Object) + oV2.SetObjectID(v.GetObjectID()) + oV2.SetHeader(v.GetHeader()) + + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) + } + case *objectV2.HeadResponse: + oV2 := new(objectV2.Object) + + var hdr *objectV2.Header + + switch v := resp.GetBody().GetHeaderPart().(type) { + case *objectV2.ShortHeader: + hdr = new(objectV2.Header) + + var idV2 refsV2.ContainerID + h.cnr.WriteToV2(&idV2) + + hdr.SetContainerID(&idV2) + hdr.SetVersion(v.GetVersion()) + hdr.SetCreationEpoch(v.GetCreationEpoch()) + hdr.SetOwnerID(v.GetOwnerID()) + hdr.SetObjectType(v.GetObjectType()) + hdr.SetPayloadLength(v.GetPayloadLength()) + case *objectV2.HeaderWithSignature: + hdr = v.GetHeader() + } + + oV2.SetHeader(hdr) + + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) + } + return nil +} + +func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) { + if idObj != nil { + var addr oid.Address + addr.SetContainer(cnr) + addr.SetObject(*idObj) + + obj, err := h.storage.Head(context.TODO(), addr) + if err == nil { + return headersFromObject(obj, cnr, idObj), true + } + } + + return addressHeaders(cnr, idObj), false +} + +func cidHeader(idCnr cid.ID) sysObjHdr { + return sysObjHdr{ + k: acl.FilterObjectContainerID, + v: idCnr.EncodeToString(), + } +} + +func oidHeader(obj oid.ID) sysObjHdr { + return sysObjHdr{ + k: acl.FilterObjectID, + v: obj.EncodeToString(), + } +} + +func ownerIDHeader(ownerID user.ID) sysObjHdr { + return sysObjHdr{ + k: acl.FilterObjectOwnerID, + v: ownerID.EncodeToString(), + } +} + +func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header { + hh := make([]eaclSDK.Header, 0, 2) + hh = append(hh, cidHeader(cnr)) + + if oid != nil { + hh = append(hh, oidHeader(*oid)) + } + + return hh +} diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go new file mode 100644 index 000000000..92570a3c5 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/object.go @@ -0,0 +1,92 @@ +package v2 + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +type sysObjHdr struct { + k, v string +} + +func (s sysObjHdr) Key() string { + return s.k +} + +func (s sysObjHdr) Value() string { + return s.v +} + +func u64Value(v uint64) string { + return strconv.FormatUint(v, 10) +} + +func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header { + var count int + for obj := obj; obj != nil; obj = obj.Parent() { + count += 9 + len(obj.Attributes()) + } + + res := make([]eaclSDK.Header, 0, count) + for ; obj != nil; obj = obj.Parent() { + res = append(res, + cidHeader(cnr), + // creation epoch + sysObjHdr{ + k: acl.FilterObjectCreationEpoch, + v: u64Value(obj.CreationEpoch()), + }, + // payload size + sysObjHdr{ + k: acl.FilterObjectPayloadLength, + v: u64Value(obj.PayloadSize()), + }, + // object version + sysObjHdr{ + k: acl.FilterObjectVersion, + v: obj.Version().String(), + }, + // object type + sysObjHdr{ + k: acl.FilterObjectType, + v: obj.Type().String(), + }, + ) + + if oid != nil { + res = append(res, oidHeader(*oid)) + } + + if idOwner := obj.OwnerID(); !idOwner.IsEmpty() { + res = append(res, ownerIDHeader(idOwner)) + } + + cs, ok := obj.PayloadChecksum() + if ok { + res = append(res, sysObjHdr{ + k: acl.FilterObjectPayloadHash, + v: cs.String(), + }) + } + + cs, ok = obj.PayloadHomomorphicHash() + if ok { + res = append(res, sysObjHdr{ + k: acl.FilterObjectHomomorphicHash, + v: cs.String(), + }) + } + + attrs := obj.Attributes() + for i := range attrs { + res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface + } + } + + return res +} diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go new file mode 100644 index 000000000..d91a21c75 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/opts.go @@ -0,0 +1,11 @@ +package v2 + +import ( + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func WithOID(v *oid.ID) Option { + return func(c *cfg) { + c.obj = v + } +} diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go new file mode 100644 index 000000000..ce380c117 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/xheader.go @@ -0,0 +1,69 @@ +package v2 + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" +) + +type XHeaderSource interface { + GetXHeaders() []eaclSDK.Header +} + +type requestXHeaderSource struct { + req Request +} + +func NewRequestXHeaderSource(req Request) XHeaderSource { + return requestXHeaderSource{req: req} +} + +type responseXHeaderSource struct { + resp Response + + req Request +} + +func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource { + return responseXHeaderSource{resp: resp, req: req} +} + +func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header { + ln := 0 + + for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { + ln += len(meta.GetXHeaders()) + } + + res := make([]eaclSDK.Header, 0, ln) + for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { + x := meta.GetXHeaders() + for i := range x { + res = append(res, (xHeader)(x[i])) + } + } + + return res +} + +func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header { + ln := 0 + xHdrs := make([][]session.XHeader, 0) + + for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { + x := meta.GetXHeaders() + + ln += len(x) + + xHdrs = append(xHdrs, x) + } + + res := make([]eaclSDK.Header, 0, ln) + + for i := range xHdrs { + for j := range xHdrs[i] { + res = append(res, xHeader(xHdrs[i][j])) + } + } + + return res +} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go new file mode 100644 index 000000000..cd2de174a --- /dev/null +++ b/pkg/services/object/acl/v2/errors.go @@ -0,0 +1,20 @@ +package v2 + +import ( + "fmt" +) + +const invalidRequestMessage = "malformed request" + +func malformedRequestError(reason string) error { + return fmt.Errorf("%s: %s", invalidRequestMessage, reason) +} + +var ( + errEmptyBody = malformedRequestError("empty body") + errEmptyVerificationHeader = malformedRequestError("empty verification header") + errEmptyBodySig = malformedRequestError("empty at body signature") + errInvalidSessionSig = malformedRequestError("invalid session token signature") + errInvalidSessionOwner = malformedRequestError("invalid session token owner") + errInvalidVerb = malformedRequestError("session token verb is invalid") +) diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go new file mode 100644 index 000000000..15fcce884 --- /dev/null +++ b/pkg/services/object/acl/v2/opts.go @@ -0,0 +1,12 @@ +package v2 + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" +) + +// WithLogger returns option to set logger. +func WithLogger(v *logger.Logger) Option { + return func(c *cfg) { + c.log = v + } +} diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go new file mode 100644 index 000000000..8bd34ccb3 --- /dev/null +++ b/pkg/services/object/acl/v2/request.go @@ -0,0 +1,152 @@ +package v2 + +import ( + "crypto/ecdsa" + "fmt" + + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +// RequestInfo groups parsed version-independent (from SDK library) +// request information and raw API request. +type RequestInfo struct { + basicACL acl.Basic + requestRole acl.Role + operation acl.Op // put, get, head, etc. + cnrOwner user.ID // container owner + + // cnrNamespace defined to which namespace a container is belonged. + cnrNamespace string + + idCnr cid.ID + + // optional for some request + // e.g. Put, Search + obj *oid.ID + + senderKey []byte + + bearer *bearer.Token // bearer token of request + + srcRequest any +} + +func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) { + r.basicACL = basicACL +} + +func (r *RequestInfo) SetRequestRole(requestRole acl.Role) { + r.requestRole = requestRole +} + +func (r *RequestInfo) SetSenderKey(senderKey []byte) { + r.senderKey = senderKey +} + +// Request returns raw API request. +func (r RequestInfo) Request() any { + return r.srcRequest +} + +// ContainerOwner returns owner if the container. +func (r RequestInfo) ContainerOwner() user.ID { + return r.cnrOwner +} + +func (r RequestInfo) ContainerNamespace() string { + return r.cnrNamespace +} + +// ObjectID return object ID. +func (r RequestInfo) ObjectID() *oid.ID { + return r.obj +} + +// ContainerID return container ID. +func (r RequestInfo) ContainerID() cid.ID { + return r.idCnr +} + +// CleanBearer forces cleaning bearer token information. +func (r *RequestInfo) CleanBearer() { + r.bearer = nil +} + +// Bearer returns bearer token of the request. +func (r RequestInfo) Bearer() *bearer.Token { + return r.bearer +} + +// BasicACL returns basic ACL of the container. +func (r RequestInfo) BasicACL() acl.Basic { + return r.basicACL +} + +// SenderKey returns public key of the request's sender. +func (r RequestInfo) SenderKey() []byte { + return r.senderKey +} + +// Operation returns request's operation. +func (r RequestInfo) Operation() acl.Op { + return r.operation +} + +// RequestRole returns request sender's role. +func (r RequestInfo) RequestRole() acl.Role { + return r.requestRole +} + +// MetaWithToken groups session and bearer tokens, +// verification header and raw API request. +type MetaWithToken struct { + vheader *sessionV2.RequestVerificationHeader + token *sessionSDK.Object + bearer *bearer.Token + src any +} + +// RequestOwner returns ownerID and its public key +// according to internal meta information. +func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { + if r.vheader == nil { + return nil, nil, errEmptyVerificationHeader + } + + if r.bearer != nil && r.bearer.Impersonate() { + return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) + } + + // if session token is presented, use it as truth source + if r.token != nil { + // verify signature of session token + return ownerFromToken(r.token) + } + + // otherwise get original body signature + bodySignature := originalBodySignature(r.vheader) + if bodySignature == nil { + return nil, nil, errEmptyBodySig + } + + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid signature key: %w", err) + } + + var idSender user.ID + user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) + + return &idSender, key, nil +} diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/acl/v2/request_test.go similarity index 83% rename from pkg/services/object/ape/metadata_test.go rename to pkg/services/object/acl/v2/request_test.go index fd919008f..618af3469 100644 --- a/pkg/services/object/ape/metadata_test.go +++ b/pkg/services/object/acl/v2/request_test.go @@ -1,4 +1,4 @@ -package ape +package v2 import ( "testing" @@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) { vh.SetBodySignature(&userSignature) t.Run("empty verification header", func(t *testing.T) { - req := Metadata{} + req := MetaWithToken{} checkOwner(t, req, nil, errEmptyVerificationHeader) }) t.Run("empty verification header signature", func(t *testing.T) { - req := Metadata{ - VerificationHeader: new(sessionV2.RequestVerificationHeader), + req := MetaWithToken{ + vheader: new(sessionV2.RequestVerificationHeader), } checkOwner(t, req, nil, errEmptyBodySig) }) t.Run("no tokens", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, + req := MetaWithToken{ + vheader: vh, } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, false), + req := MetaWithToken{ + vheader: vh, + bearer: newBearer(t, containerOwner, userID, false), } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, true), + req := MetaWithToken{ + vheader: vh, + bearer: newBearer(t, containerOwner, userID, true), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) { pk, err := keys.NewPrivateKey() require.NoError(t, err) - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, true), - SessionToken: newSession(t, pk), + req := MetaWithToken{ + vheader: vh, + bearer: newBearer(t, containerOwner, userID, true), + token: newSession(t, pk), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) t.Run("with session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - SessionToken: newSession(t, containerOwner), + req := MetaWithToken{ + vheader: vh, + token: newSession(t, containerOwner), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) { var tok sessionSDK.Object require.NoError(t, tok.ReadFromV2(tokV2)) - req := Metadata{ - VerificationHeader: vh, - SessionToken: &tok, + req := MetaWithToken{ + vheader: vh, + token: &tok, } checkOwner(t, req, nil, errInvalidSessionOwner) }) @@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool return &tok } -func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { +func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) { _, actual, err := req.RequestOwner() if expectedErr != nil { require.ErrorIs(t, err, expectedErr) diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go new file mode 100644 index 000000000..db0f13ee7 --- /dev/null +++ b/pkg/services/object/acl/v2/service.go @@ -0,0 +1,779 @@ +package v2 + +import ( + "context" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "go.uber.org/zap" +) + +// Service checks basic ACL rules. +type Service struct { + *cfg + + c objectCore.SenderClassifier +} + +type putStreamBasicChecker struct { + source *Service + next object.PutObjectStream +} + +type patchStreamBasicChecker struct { + source *Service + next object.PatchObjectStream + nonFirstSend bool +} + +// Option represents Service constructor option. +type Option func(*cfg) + +type cfg struct { + log *logger.Logger + + containers container.Source + + irFetcher InnerRingFetcher + + nm netmap.Source + + next object.ServiceServer +} + +// New is a constructor for object ACL checking service. +func New(next object.ServiceServer, + nm netmap.Source, + irf InnerRingFetcher, + cs container.Source, + opts ...Option, +) Service { + cfg := &cfg{ + log: logger.NewLoggerWrapper(zap.L()), + next: next, + nm: nm, + irFetcher: irf, + containers: cs, + } + + for i := range opts { + opts[i](cfg) + } + + return Service{ + cfg: cfg, + c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log), + } +} + +// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context. +// This allows to retrieve already calculated immutable request-specific values in next handler invocation. +type wrappedGetObjectStream struct { + object.GetObjectStream + + requestInfo RequestInfo +} + +func (w *wrappedGetObjectStream) Context() context.Context { + return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{ + Namespace: w.requestInfo.ContainerNamespace(), + ContainerOwner: w.requestInfo.ContainerOwner(), + SenderKey: w.requestInfo.SenderKey(), + Role: w.requestInfo.RequestRole(), + BearerToken: w.requestInfo.Bearer(), + }) +} + +func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream { + return &wrappedGetObjectStream{ + GetObjectStream: getObjectStream, + requestInfo: reqInfo, + } +} + +// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context. +// This allows to retrieve already calculated immutable request-specific values in next handler invocation. +type wrappedRangeStream struct { + object.GetObjectRangeStream + + requestInfo RequestInfo +} + +func (w *wrappedRangeStream) Context() context.Context { + return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{ + Namespace: w.requestInfo.ContainerNamespace(), + ContainerOwner: w.requestInfo.ContainerOwner(), + SenderKey: w.requestInfo.SenderKey(), + Role: w.requestInfo.RequestRole(), + BearerToken: w.requestInfo.Bearer(), + }) +} + +func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream { + return &wrappedRangeStream{ + GetObjectRangeStream: rangeStream, + requestInfo: reqInfo, + } +} + +// wrappedSearchStream propagates RequestContext into SearchStream's context. +// This allows to retrieve already calculated immutable request-specific values in next handler invocation. +type wrappedSearchStream struct { + object.SearchStream + + requestInfo RequestInfo +} + +func (w *wrappedSearchStream) Context() context.Context { + return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{ + Namespace: w.requestInfo.ContainerNamespace(), + ContainerOwner: w.requestInfo.ContainerOwner(), + SenderKey: w.requestInfo.SenderKey(), + Role: w.requestInfo.RequestRole(), + BearerToken: w.requestInfo.Bearer(), + }) +} + +func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream { + return &wrappedSearchStream{ + SearchStream: searchStream, + requestInfo: reqInfo, + } +} + +// Get implements ServiceServer interface, makes ACL checks and calls +// next Get method in the ServiceServer pipeline. +func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) + if err != nil { + return err + } + + reqInfo.obj = obj + + return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) +} + +func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { + streamer, err := b.next.Put(ctx) + + return putStreamBasicChecker{ + source: &b, + next: streamer, + }, err +} + +func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { + streamer, err := b.next.Patch(ctx) + + return &patchStreamBasicChecker{ + source: &b, + next: streamer, + }, err +} + +func (b Service) Head( + ctx context.Context, + request *objectV2.HeadRequest, +) (*objectV2.HeadResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return nil, err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return nil, err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.Head(requestContext(ctx, reqInfo), request) +} + +func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { + id, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, id, nil) + if err != nil { + return err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) + if err != nil { + return err + } + + return b.next.Search(request, newWrappedSearchStream(stream, reqInfo)) +} + +func (b Service) Delete( + ctx context.Context, + request *objectV2.DeleteRequest, +) (*objectV2.DeleteResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return nil, err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return nil, err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.Delete(requestContext(ctx, reqInfo), request) +} + +func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) + if err != nil { + return err + } + + reqInfo.obj = obj + + return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo)) +} + +func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { + return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{ + Namespace: reqInfo.ContainerNamespace(), + ContainerOwner: reqInfo.ContainerOwner(), + SenderKey: reqInfo.SenderKey(), + Role: reqInfo.RequestRole(), + BearerToken: reqInfo.Bearer(), + }) +} + +func (b Service) GetRangeHash( + ctx context.Context, + request *objectV2.GetRangeHashRequest, +) (*objectV2.GetRangeHashResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return nil, err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return nil, err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) +} + +func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID() + if idV2 == nil { + return nil, errors.New("missing object owner") + } + + var idOwner user.ID + + err = idOwner.ReadFromV2(*idV2) + if err != nil { + return nil, fmt.Errorf("invalid object owner: %w", err) + } + + obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID()) + if err != nil { + return nil, err + } + + var sTok *sessionSDK.Object + sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) + if err != nil { + return nil, err + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.PutSingle(requestContext(ctx, reqInfo), request) +} + +func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { + body := request.GetBody() + if body == nil { + return errEmptyBody + } + + part := body.GetObjectPart() + if part, ok := part.(*objectV2.PutObjectPartInit); ok { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + idV2 := part.GetHeader().GetOwnerID() + if idV2 == nil { + return errors.New("missing object owner") + } + + var idOwner user.ID + + err = idOwner.ReadFromV2(*idV2) + if err != nil { + return fmt.Errorf("invalid object owner: %w", err) + } + + objV2 := part.GetObjectID() + var obj *oid.ID + + if objV2 != nil { + obj = new(oid.ID) + + err = obj.ReadFromV2(*objV2) + if err != nil { + return err + } + } + + var sTok *sessionSDK.Object + sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) + if err != nil { + return err + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) + if err != nil { + return err + } + + reqInfo.obj = obj + + ctx = requestContext(ctx, reqInfo) + } + + return p.next.Send(ctx, request) +} + +func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { + var sTok *sessionSDK.Object + + if tokV2 != nil { + sTok = new(sessionSDK.Object) + + err := sTok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { + // if session relates to object's removal, we don't check + // relation of the tombstone to the session here since user + // can't predict tomb's ID. + err = assertSessionRelation(*sTok, cnr, nil) + } else { + err = assertSessionRelation(*sTok, cnr, obj) + } + + if err != nil { + return nil, err + } + } + + return sTok, nil +} + +func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { + return p.next.CloseAndRecv(ctx) +} + +func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { + body := request.GetBody() + if body == nil { + return errEmptyBody + } + + if !p.nonFirstSend { + p.nonFirstSend = true + + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + objV2 := request.GetBody().GetAddress().GetObjectID() + if objV2 == nil { + return errors.New("missing oid") + } + obj := new(oid.ID) + err = obj.ReadFromV2(*objV2) + if err != nil { + return err + } + + var sTok *sessionSDK.Object + sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) + if err != nil { + return err + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) + if err != nil { + return err + } + + reqInfo.obj = obj + + ctx = requestContext(ctx, reqInfo) + } + + return p.next.Send(ctx, request) +} + +func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + return p.next.CloseAndRecv(ctx) +} + +func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { + cnr, err := b.containers.Get(idCnr) // fetch actual container + if err != nil { + return info, err + } + + if req.token != nil { + currentEpoch, err := b.nm.Epoch() + if err != nil { + return info, errors.New("can't fetch current epoch") + } + if req.token.ExpiredAt(currentEpoch) { + return info, new(apistatus.SessionTokenExpired) + } + if req.token.InvalidAt(currentEpoch) { + return info, fmt.Errorf("%s: token is invalid at %d epoch)", + invalidRequestMessage, currentEpoch) + } + + if !assertVerb(*req.token, op) { + return info, errInvalidVerb + } + } + + // find request role and key + ownerID, ownerKey, err := req.RequestOwner() + if err != nil { + return info, err + } + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) + if err != nil { + return info, err + } + + info.basicACL = cnr.Value.BasicACL() + info.requestRole = res.Role + info.operation = op + info.cnrOwner = cnr.Value.Owner() + info.idCnr = idCnr + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + info.cnrNamespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + info.senderKey = res.Key + + // add bearer token if it is present in request + info.bearer = req.bearer + + info.srcRequest = req.src + + return info, nil +} + +// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. +func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { + cnr, err := b.containers.Get(idCnr) // fetch actual container + if err != nil { + return info, err + } + + if req.token != nil { + currentEpoch, err := b.nm.Epoch() + if err != nil { + return info, errors.New("can't fetch current epoch") + } + if req.token.ExpiredAt(currentEpoch) { + return info, new(apistatus.SessionTokenExpired) + } + if req.token.InvalidAt(currentEpoch) { + return info, fmt.Errorf("%s: token is invalid at %d epoch)", + invalidRequestMessage, currentEpoch) + } + } + + // find request role and key + ownerID, ownerKey, err := req.RequestOwner() + if err != nil { + return info, err + } + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) + if err != nil { + return info, err + } + + info.basicACL = cnr.Value.BasicACL() + info.requestRole = res.Role + info.cnrOwner = cnr.Value.Owner() + info.idCnr = idCnr + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + info.cnrNamespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + info.senderKey = res.Key + + // add bearer token if it is present in request + info.bearer = req.bearer + + info.srcRequest = req.src + + return info, nil +} diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go new file mode 100644 index 000000000..b03261b90 --- /dev/null +++ b/pkg/services/object/acl/v2/types.go @@ -0,0 +1,9 @@ +package v2 + +// InnerRingFetcher is an interface that must provide +// Inner Ring information. +type InnerRingFetcher interface { + // InnerRingKeys must return list of public keys of + // the actual inner ring. + InnerRingKeys() ([][]byte, error) +} diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/acl/v2/util.go similarity index 58% rename from pkg/services/object/ape/util.go rename to pkg/services/object/acl/v2/util.go index 5cd2caa50..e02f70771 100644 --- a/pkg/services/object/ape/util.go +++ b/pkg/services/object/acl/v2/util.go @@ -1,4 +1,4 @@ -package ape +package v2 import ( "crypto/ecdsa" @@ -6,34 +6,57 @@ import ( "errors" "fmt" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) -func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return +var errMissingContainerID = errors.New("missing container ID") + +func getContainerIDFromRequest(req any) (cid.ID, error) { + var idV2 *refsV2.ContainerID + var id cid.ID + + switch v := req.(type) { + case *objectV2.GetRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.PutRequest: + part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit) + if !ok { + return cid.ID{}, errors.New("can't get container ID in chunk") } - } else { - err = errMissingContainerID - return + + idV2 = part.GetHeader().GetContainerID() + case *objectV2.HeadRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.SearchRequest: + idV2 = v.GetBody().GetContainerID() + case *objectV2.DeleteRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.GetRangeRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.GetRangeHashRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.PutSingleRequest: + idV2 = v.GetBody().GetObject().GetHeader().GetContainerID() + case *objectV2.PatchRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + default: + return cid.ID{}, errors.New("unknown request type") } - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } + if idV2 == nil { + return cid.ID{}, errMissingContainerID } - return + + return id, id.ReadFromV2(*idV2) } // originalBearerToken goes down to original request meta header and fetches @@ -52,6 +75,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er return &tok, tok.ReadFromV2(*tokV2) } +// originalSessionToken goes down to original request meta header and fetches +// session token from there. +func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) { + for header.GetOrigin() != nil { + header = header.GetOrigin() + } + + tokV2 := header.GetSessionToken() + if tokV2 == nil { + return nil, nil + } + + var tok sessionSDK.Object + + err := tok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + return &tok, nil +} + +// getObjectIDFromRequestBody decodes oid.ID from the common interface of the +// object reference's holders. Returns an error if object ID is missing in the request. +func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) { + idV2 := body.GetAddress().GetObjectID() + return getObjectIDFromRefObjectID(idV2) +} + +func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) { + if idV2 == nil { + return nil, errors.New("missing object ID") + } + + var id oid.ID + + err := id.ReadFromV2(*idV2) + if err != nil { + return nil, err + } + + return &id, nil +} + func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { // 1. First check signature of session token. if !token.VerifySignature() { @@ -105,16 +172,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -// assertVerb checks that token verb corresponds to the method. -func assertVerb(tok sessionSDK.Object, method string) bool { - switch method { - case nativeschema.MethodPutObject: +// assertVerb checks that token verb corresponds to op. +func assertVerb(tok sessionSDK.Object, op acl.Op) bool { + switch op { + case acl.OpObjectPut: return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) - case nativeschema.MethodDeleteObject: + case acl.OpObjectDelete: return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case nativeschema.MethodGetObject: + case acl.OpObjectGet: return tok.AssertVerb(sessionSDK.VerbObjectGet) - case nativeschema.MethodHeadObject: + case acl.OpObjectHead: return tok.AssertVerb( sessionSDK.VerbObjectHead, sessionSDK.VerbObjectGet, @@ -123,15 +190,14 @@ func assertVerb(tok sessionSDK.Object, method string) bool { sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch, ) - case nativeschema.MethodSearchObject: + case acl.OpObjectSearch: return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case nativeschema.MethodRangeObject: + case acl.OpObjectRange: return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) - case nativeschema.MethodHashObject: + case acl.OpObjectHash: return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) - case nativeschema.MethodPatchObject: - return tok.AssertVerb(sessionSDK.VerbObjectPatch) } + return false } @@ -155,15 +221,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error return nil } - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go new file mode 100644 index 000000000..4b19cecfe --- /dev/null +++ b/pkg/services/object/acl/v2/util_test.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" + aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" + "github.com/stretchr/testify/require" +) + +func TestOriginalTokens(t *testing.T) { + sToken := sessiontest.ObjectSigned() + bToken := bearertest.Token() + + pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, bToken.Sign(*pk)) + + var bTokenV2 acl.BearerToken + bToken.WriteToV2(&bTokenV2) + // This line is needed because SDK uses some custom format for + // reserved filters, so `cid.ID` is not converted to string immediately. + require.NoError(t, bToken.ReadFromV2(bTokenV2)) + + var sTokenV2 session.Token + sToken.WriteToV2(&sTokenV2) + + for i := range 10 { + metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) + res, err := originalSessionToken(metaHeaders) + require.NoError(t, err) + require.Equal(t, sToken, res, i) + + bTok, err := originalBearerToken(metaHeaders) + require.NoError(t, err) + require.Equal(t, &bToken, bTok, i) + } +} + +func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader { + metaHeader := new(session.RequestMetaHeader) + metaHeader.SetBearerToken(b) + metaHeader.SetSessionToken(s) + + for range depth { + link := metaHeader + metaHeader = new(session.RequestMetaHeader) + metaHeader.SetOrigin(link) + } + + return metaHeader +} + +func TestIsVerbCompatible(t *testing.T) { + // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28 + table := map[aclsdk.Op][]sessionSDK.ObjectVerb{ + aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete}, + aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete}, + aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet}, + aclsdk.OpObjectHead: { + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + }, + aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash}, + aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash}, + aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, + } + + verbs := []sessionSDK.ObjectVerb{ + sessionSDK.VerbObjectPut, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectSearch, + } + + var tok sessionSDK.Object + + for op, list := range table { + for _, verb := range verbs { + var contains bool + for _, v := range list { + if v == verb { + contains = true + break + } + } + + tok.ForVerb(verb) + + require.Equal(t, contains, assertVerb(tok, op), + "%v in token, %s executing", verb, op) + } + } +} + +func TestAssertSessionRelation(t *testing.T) { + var tok sessionSDK.Object + cnr := cidtest.ID() + cnrOther := cidtest.ID() + obj := oidtest.ID() + objOther := oidtest.ID() + + // make sure ids differ, otherwise test won't work correctly + require.False(t, cnrOther.Equals(cnr)) + require.False(t, objOther.Equals(obj)) + + // bind session to the container (required) + tok.BindContainer(cnr) + + // test container-global session + require.NoError(t, assertSessionRelation(tok, cnr, nil)) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnrOther, nil)) + require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) + + // limit the session to the particular object + tok.LimitByObjects(obj) + + // test fixed object session (here obj arg must be non-nil everywhere) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnr, &objOther)) +} diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index bb6067a37..4a3b5ba5e 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -64,9 +64,6 @@ type Prm struct { // An encoded container's owner user ID. ContainerOwner user.ID - // Attributes defined for the container. - ContainerAttributes map[string]string - // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token @@ -79,10 +76,9 @@ var errMissingOID = errors.New("object ID is not set") // CheckAPE prepares an APE-request and checks if it is permitted by policies. func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { // APE check is ignored for some inter-node requests. - switch prm.Role { - case nativeschema.PropertyValueContainerRoleContainer: + if prm.Role == nativeschema.PropertyValueContainerRoleContainer { return nil - case nativeschema.PropertyValueContainerRoleIR: + } else if prm.Role == nativeschema.PropertyValueContainerRoleIR { switch prm.Method { case nativeschema.MethodGetObject, nativeschema.MethodHeadObject, @@ -103,7 +99,7 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { return err } - return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{ + return c.checkerCore.CheckAPE(checkercore.CheckPrm{ Request: r, PublicKey: pub, Namespace: prm.Namespace, diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index 97eb2b2d7..e03b5750c 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { return pk.GetScriptHash() } -func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160 return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -619,21 +619,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) + return s.GetNetMapByEpoch(s.currentEpoch - diff) } -func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { +func (s *netmapStub) Epoch() (uint64, error) { return s.currentEpoch, nil } @@ -641,14 +641,14 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { return nil, nil } diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 82e660a7f..6e458b384 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -7,21 +7,6 @@ import ( apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) -var ( - errMissingContainerID = malformedRequestError("missing container ID") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) - -func malformedRequestError(reason string) error { - invalidArgErr := &apistatus.InvalidArgument{} - invalidArgErr.SetMessage(reason) - return invalidArgErr -} - func toStatusErr(err error) error { var chRouterErr *checkercore.ChainRouterError if !errors.As(err, &chRouterErr) { diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go deleted file mode 100644 index 102985aa6..000000000 --- a/pkg/services/object/ape/metadata.go +++ /dev/null @@ -1,179 +0,0 @@ -package ape - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type Metadata struct { - Container cid.ID - Object *oid.ID - MetaHeader *session.RequestMetaHeader - VerificationHeader *session.RequestVerificationHeader - SessionToken *sessionSDK.Object - BearerToken *bearer.Token -} - -func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if m.VerificationHeader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if m.BearerToken != nil && m.BearerToken.Impersonate() { - return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if m.SessionToken != nil { - // verify signature of session token - return ownerFromToken(m.SessionToken) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(m.VerificationHeader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -// RequestInfo contains request information extracted by request metadata. -type RequestInfo struct { - // Role defines under which role this request is executed. - // It must be represented only as a constant represented in native schema. - Role string - - ContainerOwner user.ID - - ContainerAttributes map[string]string - - // Namespace defines to which namespace a container is belonged. - Namespace string - - // HEX-encoded sender key. - SenderKey string -} - -type RequestInfoExtractor interface { - GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) -} - -type extractor struct { - containers container.Source - - nm netmap.Source - - classifier objectCore.SenderClassifier -} - -func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { - return &extractor{ - containers: containers, - nm: nm, - classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), - } -} - -func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { - currentEpoch, err := e.nm.Epoch(ctx) - if err != nil { - return errors.New("can't fetch current epoch") - } - if sessionToken.ExpiredAt(currentEpoch) { - return new(apistatus.SessionTokenExpired) - } - if sessionToken.InvalidAt(currentEpoch) { - return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) - } - if !assertVerb(*sessionToken, method) { - return errInvalidVerb - } - return nil -} - -func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { - cnr, err := e.containers.Get(ctx, m.Container) - if err != nil { - return ri, err - } - - if m.SessionToken != nil { - if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { - return ri, err - } - } - - ownerID, ownerKey, err := m.RequestOwner() - if err != nil { - return ri, err - } - res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) - if err != nil { - return ri, err - } - - ri.Role = nativeSchemaRole(res.Role) - ri.ContainerOwner = cnr.Value.Owner() - - ri.ContainerAttributes = map[string]string{} - for key, val := range cnr.Value.Attributes() { - ri.ContainerAttributes[key] = val - } - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - ri.Namespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - ri.SenderKey = hex.EncodeToString(res.Key) - - return ri, nil -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index 39dd7f476..cb9bbf1b8 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -57,16 +57,11 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string { } // objectProperties collects object properties from address parameters and a header if it is passed. -func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string { +func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string { objectProps := map[string]string{ nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(), } - for attrName, attrValue := range cnrAttrs { - prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName) - objectProps[prop] = attrValue - } - objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString() if oid != nil { @@ -145,7 +140,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re reqProps[xheadKey] = xhead.GetValue() } - reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm) + reqProps, err = c.fillWithUserClaimTags(reqProps, prm) if err != nil { return defaultRequest, err } @@ -160,7 +155,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re prm.Method, aperequest.NewResource( resourceName(prm.Container, prm.Object, prm.Namespace), - objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header), + objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header), ), reqProps, ), nil @@ -182,7 +177,7 @@ func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, heade return nil, fmt.Errorf("EC parent object ID format error: %w", err) } // only container node have access to collect parent object - contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container) + contNode, err := c.currentNodeIsContainerNode(prm.Container) if err != nil { return nil, fmt.Errorf("check container node status: %w", err) } @@ -205,13 +200,13 @@ func isLogicalError(err error) bool { return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound) } -func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) { - cnr, err := c.cnrSource.Get(ctx, cnrID) +func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { + cnr, err := c.cnrSource.Get(cnrID) if err != nil { return false, err } - nm, err := netmap.GetLatestNetworkMap(ctx, c.nm) + nm, err := netmap.GetLatestNetworkMap(c.nm) if err != nil { return false, err } @@ -225,7 +220,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid. return true, nil } - nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm) + nm, err = netmap.GetPreviousNetworkMap(c.nm) if err != nil { return false, err } @@ -234,7 +229,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid. } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) { +func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } @@ -242,7 +237,7 @@ func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[st if err != nil { return nil, err } - props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index fcf7c4c40..787785b60 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -7,7 +7,6 @@ import ( "testing" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" - cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -20,20 +19,11 @@ import ( ) const ( - testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y" + testOwnerID = "FPPtmAi9TCX329" incomingIP = "192.92.33.1" - - testSysAttrName = "unittest" - - testSysAttrZone = "eggplant" ) -var containerAttrs = map[string]string{ - cnrV2.SysAttributeName: testSysAttrName, - cnrV2.SysAttributeZone: testSysAttrZone, -} - func ctxWithPeerInfo() context.Context { return peer.NewContext(context.Background(), &peer.Peer{ Addr: &net.TCPAddr{ @@ -115,7 +105,7 @@ func TestObjectProperties(t *testing.T) { var testCnrOwner user.ID require.NoError(t, testCnrOwner.DecodeString(testOwnerID)) - props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader()) + props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader()) require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID]) require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID]) @@ -134,8 +124,6 @@ func TestObjectProperties(t *testing.T) { require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType]) require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash]) require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash]) - require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)]) - require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)]) for _, attr := range test.header.attributes { require.Equal(t, attr.val, props[attr.key]) @@ -257,10 +245,6 @@ func TestNewAPERequest(t *testing.T) { Role: role, SenderKey: senderKey, ContainerOwner: testCnrOwner, - ContainerAttributes: map[string]string{ - cnrV2.SysAttributeZone: testSysAttrZone, - cnrV2.SysAttributeName: testSysAttrName, - }, } headerSource := newHeaderProviderMock() @@ -293,7 +277,7 @@ func TestNewAPERequest(t *testing.T) { method, aperequest.NewResource( resourceName(cnr, obj, prm.Namespace), - objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header { + objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header { if headerObjSDK != nil { return headerObjSDK.ToV2().GetHeader() } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index 5e04843f3..c6d152e0f 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -2,6 +2,9 @@ package ape import ( "context" + "encoding/hex" + "errors" + "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" @@ -9,18 +12,19 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" ) +var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") + type Service struct { apeChecker Checker - extractor RequestInfoExtractor - next objectSvc.ServiceServer } @@ -60,10 +64,9 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service { return &Service{ apeChecker: apeChecker, - extractor: extractor, next: next, } } @@ -73,9 +76,15 @@ type getStreamBasicChecker struct { apeChecker Checker - metadata Metadata + namespace string - reqInfo RequestInfo + senderKey []byte + + containerOwner user.ID + + role string + + bearerToken *bearer.Token } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { @@ -86,17 +95,16 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.reqInfo.Namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodGetObject, - SenderKey: g.reqInfo.SenderKey, - ContainerOwner: g.reqInfo.ContainerOwner, - ContainerAttributes: g.reqInfo.ContainerAttributes, - Role: g.reqInfo.Role, - BearerToken: g.metadata.BearerToken, - XHeaders: resp.GetMetaHeader().GetXHeaders(), + Namespace: g.namespace, + Container: cnrID, + Object: objID, + Header: partInit.GetHeader(), + Method: nativeschema.MethodGetObject, + SenderKey: hex.EncodeToString(g.senderKey), + ContainerOwner: g.containerOwner, + Role: g.role, + BearerToken: g.bearerToken, + XHeaders: resp.GetMetaHeader().GetXHeaders(), } if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil { @@ -106,54 +114,64 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { return g.GetObjectStream.Send(resp) } +func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) { + untyped := ctx.Value(objectSvc.RequestContextKey) + if untyped == nil { + return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey) + } + rc, ok := untyped.(*objectSvc.RequestContext) + if !ok { + return nil, errFailedToCastToRequestContext + } + return rc, nil +} + func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqCtx, err := requestContext(stream.Context()) if err != nil { - return err - } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) - if err != nil { - return err + return toStatusErr(err) } + return c.next.Get(request, &getStreamBasicChecker{ GetObjectStream: stream, apeChecker: c.apeChecker, - metadata: md, - reqInfo: reqInfo, + namespace: reqCtx.Namespace, + senderKey: reqCtx.SenderKey, + containerOwner: reqCtx.ContainerOwner, + role: nativeSchemaRole(reqCtx.Role), + bearerToken: reqCtx.BearerToken, }) } type putStreamBasicChecker struct { apeChecker Checker - extractor RequestInfoExtractor - next objectSvc.PutObjectStream } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) + reqCtx, err := requestContext(ctx) if err != nil { - return err + return toStatusErr(err) } - reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) + + cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) if err != nil { - return err + return toStatusErr(err) } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: partInit.GetHeader(), - Method: nativeschema.MethodPutObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Header: partInit.GetHeader(), + Method: nativeschema.MethodPutObject, + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + Role: nativeSchemaRole(reqCtx.Role), + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -173,7 +191,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { return &putStreamBasicChecker{ apeChecker: c.apeChecker, - extractor: c.extractor, next: streamer, }, err } @@ -181,8 +198,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { type patchStreamBasicChecker struct { apeChecker Checker - extractor RequestInfoExtractor - next objectSvc.PatchObjectStream nonFirstSend bool @@ -192,26 +207,26 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa if !p.nonFirstSend { p.nonFirstSend = true - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqCtx, err := requestContext(ctx) if err != nil { - return err + return toStatusErr(err) } - reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return err + return toStatusErr(err) } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodPatchObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Method: nativeschema.MethodPatchObject, + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + Role: nativeSchemaRole(reqCtx.Role), + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -231,17 +246,17 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error return &patchStreamBasicChecker{ apeChecker: c.apeChecker, - extractor: c.extractor, next: streamer, }, err } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } @@ -255,7 +270,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj switch headerPart := resp.GetBody().GetHeaderPart().(type) { case *objectV2.ShortHeader: cidV2 := new(refs.ContainerID) - md.Container.WriteToV2(cidV2) + cnrID.WriteToV2(cidV2) header.SetContainerID(cidV2) header.SetVersion(headerPart.GetVersion()) header.SetCreationEpoch(headerPart.GetCreationEpoch()) @@ -271,17 +286,16 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: header, - Method: nativeschema.MethodHeadObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Header: header, + Method: nativeschema.MethodHeadObject, + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -290,25 +304,27 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) - if err != nil { - return err + var cnrID cid.ID + if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { + if err := cnrID.ReadFromV2(*cnrV2); err != nil { + return toStatusErr(err) + } } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) + + reqCtx, err := requestContext(stream.Context()) if err != nil { - return err + return toStatusErr(err) } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Method: nativeschema.MethodSearchObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Method: nativeschema.MethodSearchObject, + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -318,26 +334,26 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodDeleteObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Method: nativeschema.MethodDeleteObject, + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -352,26 +368,26 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return err + return toStatusErr(err) } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) + + reqCtx, err := requestContext(stream.Context()) if err != nil { - return err + return toStatusErr(err) } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodRangeObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Method: nativeschema.MethodRangeObject, + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -381,26 +397,26 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodHashObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Method: nativeschema.MethodHashObject, + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -415,27 +431,27 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: request.GetBody().GetObject().GetHeader(), - Method: nativeschema.MethodPutObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - ContainerAttributes: reqInfo.ContainerAttributes, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, + Header: request.GetBody().GetObject().GetHeader(), + Method: nativeschema.MethodPutObject, + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: request.GetMetaHeader().GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -445,36 +461,18 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ return c.next.PutSingle(ctx, request) } -type request interface { - GetMetaHeader() *session.RequestMetaHeader - GetVerificationHeader() *session.RequestVerificationHeader -} - -func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin +func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { + if cidV2 != nil { + if err = cnrID.ReadFromV2(*cidV2); err != nil { + return + } } - cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) - if err != nil { - return - } - session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) - if err != nil { - return - } - bearer, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return - } - - md = Metadata{ - Container: cnrID, - Object: objID, - VerificationHeader: request.GetVerificationHeader(), - SessionToken: session, - BearerToken: bearer, + if objV2 != nil { + objID = new(oid.ID) + if err = objID.ReadFromV2(*objV2); err != nil { + return + } } return } diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go index 97dbfa658..46e55360d 100644 --- a/pkg/services/object/ape/types.go +++ b/pkg/services/object/ape/types.go @@ -7,11 +7,3 @@ import "context" type Checker interface { CheckAPE(context.Context, Prm) error } - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys(ctx context.Context) ([][]byte, error) -} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go deleted file mode 100644 index 916bce427..000000000 --- a/pkg/services/object/ape/util_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package ape - -import ( - "slices" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" -) - -func TestIsVerbCompatible(t *testing.T) { - table := map[string][]sessionSDK.ObjectVerb{ - nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, - nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, - nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, - nativeschema.MethodHeadObject: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectPatch, - }, - nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, - nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, - nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - sessionSDK.VerbObjectPatch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - contains := slices.Contains(list, verb) - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index f8ee089fe..dde9f8fc0 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -163,7 +163,7 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error if err != nil { a.failed = true } - if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) @@ -224,7 +224,7 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e if err != nil { a.failed = true } - if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index f2bd907db..9e0f49297 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -1,7 +1,6 @@ package target import ( - "context" "errors" "fmt" @@ -14,20 +13,20 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) -func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { // prepare needed put parameters - if err := preparePrm(ctx, &prm); err != nil { + if err := preparePrm(&prm); err != nil { return nil, fmt.Errorf("could not prepare put parameters: %w", err) } if prm.Header.Signature() != nil { - return newUntrustedTarget(ctx, &prm) + return newUntrustedTarget(&prm) } - return newTrustedTarget(ctx, &prm) + return newTrustedTarget(&prm) } -func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) +func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") } @@ -49,9 +48,9 @@ func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transfor }, nil } -func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { prm.Relay = nil // do not relay request without signature - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") } @@ -89,8 +88,10 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme if !ownerObj.Equals(ownerSession) { return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) } - } else if !ownerObj.Equals(sessionInfo.Owner) { - return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) + } else { + if !ownerObj.Equals(sessionInfo.Owner) { + return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) + } } if prm.SignRequestPrivateKey == nil { @@ -110,11 +111,11 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme }, nil } -func preparePrm(ctx context.Context, prm *objectwriter.Params) error { +func preparePrm(prm *objectwriter.Params) error { var err error // get latest network map - nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource) + nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource) if err != nil { return fmt.Errorf("could not get latest network map: %w", err) } @@ -125,7 +126,7 @@ func preparePrm(ctx context.Context, prm *objectwriter.Params) error { } // get container to store the object - cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr) + cnrInfo, err := prm.Config.ContainerSource.Get(idCnr) if err != nil { return fmt.Errorf("could not get container by ID: %w", err) } diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 6593d3ca0..3b68efab4 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { } func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(ctx, n.Opts...) + traverser, err := placement.NewTraverser(n.Traversal.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } @@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, } // perform additional container broadcast if needed - if n.submitPrimaryPlacementFinish() { + if n.Traversal.submitPrimaryPlacementFinish() { err := n.ForEachNode(ctx, f) if err != nil { n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) @@ -79,11 +79,11 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. continue } - isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey()) + workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey()) item := new(bool) wg.Add(1) - go func() { + if err := workerPool.Submit(func() { defer wg.Done() err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) @@ -95,13 +95,17 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. traverser.SubmitSuccess() *item = true - }() + }); err != nil { + wg.Done() + svcutil.LogWorkerPoolError(ctx, n.cfg.Logger, "PUT", err) + return true + } // Mark the container node as processed in order to exclude it // in subsequent container broadcast. Note that we don't // process this node during broadcast if primary placement // on it failed. - n.submitProcessed(addr, item) + n.Traversal.submitProcessed(addr, item) } wg.Wait() diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go index fff58aca7..f7486eae7 100644 --- a/pkg/services/object/common/writer/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -95,10 +95,6 @@ func (x errIncompletePut) Error() string { return commonMsg } -func (x errIncompletePut) Unwrap() error { - return x.singleErr -} - // WriteObject implements the transformer.ObjectWriter interface. func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { t.obj = obj diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 26a53e315..94bcf6a32 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -85,7 +85,7 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error } func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) { - currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx) + currentNodeIsContainerNode, err := e.currentNodeIsContainerNode() if err != nil { return false, false, err } @@ -108,8 +108,8 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O return true, currentNodeIsContainerNode, nil } -func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) { - t, err := placement.NewTraverser(ctx, e.PlacementOpts...) +func (e *ECWriter) currentNodeIsContainerNode() (bool, error) { + t, err := placement.NewTraverser(e.PlacementOpts...) if err != nil { return false, err } @@ -128,7 +128,7 @@ func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) } func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error { - t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } @@ -149,7 +149,17 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } - err = e.Relay(ctx, info, c) + completed := make(chan interface{}) + if poolErr := e.Config.RemotePool.Submit(func() { + defer close(completed) + err = e.Relay(ctx, info, c) + }); poolErr != nil { + close(completed) + svcutil.LogWorkerPoolError(ctx, e.Config.Logger, "PUT", poolErr) + return poolErr + } + <-completed + if err == nil { return nil } @@ -170,7 +180,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error return e.writePartLocal(ctx, obj) } - t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) + t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) if err != nil { return err } @@ -207,7 +217,7 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er } partsProcessed := make([]atomic.Bool, len(parts)) objID, _ := obj.ID() - t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } @@ -333,11 +343,21 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n } func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { + var err error localTarget := LocalTarget{ Storage: e.Config.LocalStore, Container: e.Container, } - return localTarget.WriteObject(ctx, obj, e.ObjectMeta) + completed := make(chan interface{}) + if poolErr := e.Config.LocalPool.Submit(func() { + defer close(completed) + err = localTarget.WriteObject(ctx, obj, e.ObjectMeta) + }); poolErr != nil { + close(completed) + return poolErr + } + <-completed + return err } func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { @@ -351,5 +371,15 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n nodeInfo: clientNodeInfo, } - return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) + var err error + completed := make(chan interface{}) + if poolErr := e.Config.RemotePool.Submit(func() { + defer close(completed) + err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) + }); poolErr != nil { + close(completed) + return poolErr + } + <-completed + return err } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index d5eeddf21..8b2599e5f 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -7,7 +7,6 @@ import ( "crypto/sha256" "errors" "fmt" - "slices" "strconv" "testing" @@ -31,6 +30,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" ) @@ -38,10 +38,11 @@ type testPlacementBuilder struct { vectors [][]netmap.NodeInfo } -func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( +func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( [][]netmap.NodeInfo, error, ) { - arr := slices.Clone(p.vectors[0]) + arr := make([]netmap.NodeInfo, len(p.vectors[0])) + copy(arr, p.vectors[0]) return [][]netmap.NodeInfo{arr}, nil } @@ -130,13 +131,17 @@ func TestECWriter(t *testing.T) { nodeKey, err := keys.NewPrivateKey() require.NoError(t, err) - log, err := logger.NewLogger(logger.Prm{}) + pool, err := ants.NewPool(4, ants.WithNonblocking(true)) + require.NoError(t, err) + + log, err := logger.NewLogger(nil) require.NoError(t, err) var n nmKeys ecw := ECWriter{ Config: &Config{ NetmapKeys: n, + RemotePool: pool, Logger: log, ClientConstructor: clientConstructor{vectors: ns}, KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go index d3d2b41b4..0e4c4d9c6 100644 --- a/pkg/services/object/common/writer/writer.go +++ b/pkg/services/object/common/writer/writer.go @@ -12,6 +12,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -23,7 +24,7 @@ type MaxSizeSource interface { // of physically stored object in system. // // Must return 0 if value can not be obtained. - MaxObjectSize(context.Context) uint64 + MaxObjectSize() uint64 } type ClientConstructor interface { @@ -31,7 +32,7 @@ type ClientConstructor interface { } type InnerRing interface { - InnerRingKeys(ctx context.Context) ([][]byte, error) + InnerRingKeys() ([][]byte, error) } type FormatValidatorConfig interface { @@ -51,6 +52,8 @@ type Config struct { NetmapSource netmap.Source + RemotePool, LocalPool util.WorkerPool + NetmapKeys netmap.AnnouncedKeys FormatValidator *object.FormatValidator @@ -66,6 +69,12 @@ type Config struct { type Option func(*Config) +func WithWorkerPools(remote, local util.WorkerPool) Option { + return func(c *Config) { + c.RemotePool, c.LocalPool = remote, local + } +} + func WithLogger(l *logger.Logger) Option { return func(c *Config) { c.Logger = l @@ -78,6 +87,13 @@ func WithVerifySessionTokenIssuer(v bool) Option { } } +func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) { + if c.NetmapKeys.IsLocalKey(pub) { + return c.LocalPool, true + } + return c.RemotePool, false +} + type Params struct { Config *Config diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index a99ba3586..36a17bde2 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -183,7 +182,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) { for i := range members { for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body if members[i].Equals(incoming[j]) { - incoming = slices.Delete(incoming, j, j+1) + incoming = append(incoming[:j], incoming[j+1:]...) j-- } } diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 1c4d7d585..867d3f4ef 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "objectSDK.Delete service")) } } diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index e80132489..e164627d2 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque detachedExecutor.execute(ctx) - return detachedExecutor.err + return detachedExecutor.statusError.err } diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index b24c9417b..ff3f90bf2 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -2,7 +2,6 @@ package getsvc import ( "context" - "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -60,24 +59,53 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS if previousID == nil && len(childrenIDs) == 0 { return nil, objectSDK.NewSplitInfoError(a.splitInfo) } - if len(childrenIDs) > 0 { - if a.rng != nil { - err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer) - } else { - err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer) + if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil { + return nil, err } } else { - if a.rng != nil { - err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer) - } else { - err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer) + if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil { + return nil, err } } + return a.parentObject, nil +} + +func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { + var sourceObjectIDs []oid.ID + sourceObjectID, ok := a.splitInfo.Link() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + sourceObjectID, ok = a.splitInfo.LastPart() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + if len(sourceObjectIDs) == 0 { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + for _, sourceObjectID = range sourceObjectIDs { + obj, err := a.getParent(ctx, sourceObjectID, writer) + if err == nil { + return obj, nil + } + } + return nil, objectSDK.NewSplitInfoError(a.splitInfo) +} + +func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { + obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) if err != nil { return nil, err } - return a.parentObject, nil + parent := obj.Parent() + if parent == nil { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + if err := writer.WriteHeader(ctx, parent); err != nil { + return nil, err + } + return obj, nil } func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) { @@ -162,16 +190,26 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD } func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { + if a.rng == nil { + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { + return err + } + return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true) + } + + if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { return err } - return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true) + return writer.WriteChunk(ctx, a.parentObject.Payload()) } func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err + if a.rng == nil { + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { + return err + } } + if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil { return err } @@ -181,9 +219,16 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev return nil } -func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error { +func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error { + withRng := len(partRanges) > 0 && a.rng != nil + for i := range partIDs { - _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer) + var r *objectSDK.Range + if withRng { + r = &partRanges[i] + } + + _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer) if err != nil { return err } @@ -192,13 +237,22 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec } func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, err := a.buildChain(ctx, prevID) + chain, rngs, err := a.buildChain(ctx, prevID) if err != nil { return err } - slices.Reverse(chain) - return a.assemblePayloadByObjectIDs(ctx, writer, chain, false) + reverseRngs := len(rngs) > 0 + + for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { + chain[left], chain[right] = chain[right], chain[left] + + if reverseRngs { + rngs[left], rngs[right] = rngs[right], rngs[left] + } + } + + return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false) } func (a *assembler) isChild(obj *objectSDK.Object) bool { @@ -206,28 +260,63 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool { return parent == nil || equalAddresses(a.addr, object.AddressOf(parent)) } -func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) { +func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { var ( chain []oid.ID + rngs []objectSDK.Range + from = a.rng.GetOffset() + to = from + a.rng.GetLength() hasPrev = true ) // fill the chain end-to-start for hasPrev { - head, err := a.objGetter.HeadObject(ctx, prevID) - if err != nil { - return nil, err - } - if !a.isChild(head) { - return nil, errParentAddressDiffers + // check that only for "range" requests, + // for `GET` it stops via the false `withPrev` + if a.rng != nil && a.currentOffset <= from { + break } - id, _ := head.ID() - chain = append(chain, id) + head, err := a.objGetter.HeadObject(ctx, prevID) + if err != nil { + return nil, nil, err + } + if !a.isChild(head) { + return nil, nil, errParentAddressDiffers + } + + if a.rng != nil { + sz := head.PayloadSize() + + a.currentOffset -= sz + + if a.currentOffset < to { + off := uint64(0) + if from > a.currentOffset { + off = from - a.currentOffset + sz -= from - a.currentOffset + } + + if to < a.currentOffset+off+sz { + sz = to - off - a.currentOffset + } + + index := len(rngs) + rngs = append(rngs, objectSDK.Range{}) + rngs[index].SetOffset(off) + rngs[index].SetLength(sz) + + id, _ := head.ID() + chain = append(chain, id) + } + } else { + id, _ := head.ID() + chain = append(chain, id) + } prevID, hasPrev = head.PreviousID() } - return chain, nil + return chain, rngs, nil } diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go deleted file mode 100644 index ff213cb82..000000000 --- a/pkg/services/object/get/assembler_head.go +++ /dev/null @@ -1,45 +0,0 @@ -package getsvc - -import ( - "context" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - var sourceObjectIDs []oid.ID - sourceObjectID, ok := a.splitInfo.Link() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - sourceObjectID, ok = a.splitInfo.LastPart() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - if len(sourceObjectIDs) == 0 { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - for _, sourceObjectID = range sourceObjectIDs { - obj, err := a.getParent(ctx, sourceObjectID, writer) - if err == nil { - return obj, nil - } - } - return nil, objectSDK.NewSplitInfoError(a.splitInfo) -} - -func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) - if err != nil { - return nil, err - } - parent := obj.Parent() - if parent == nil { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - if err := writer.WriteHeader(ctx, parent); err != nil { - return nil, err - } - return obj, nil -} diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go deleted file mode 100644 index 780693c40..000000000 --- a/pkg/services/object/get/assembler_range.go +++ /dev/null @@ -1,87 +0,0 @@ -package getsvc - -import ( - "context" - "slices" - - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { - return err - } - return writer.WriteChunk(ctx, a.parentObject.Payload()) -} - -func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil { - return err - } - if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part - return err - } - return nil -} - -func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error { - for i := range partIDs { - _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer) - if err != nil { - return err - } - } - return nil -} - -func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, rngs, err := a.buildChainRange(ctx, prevID) - if err != nil { - return err - } - - slices.Reverse(chain) - slices.Reverse(rngs) - return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs) -} - -func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { - var ( - chain []oid.ID - rngs []objectSDK.Range - from = a.rng.GetOffset() - to = from + a.rng.GetLength() - - hasPrev = true - ) - - // fill the chain end-to-start - for hasPrev && from < a.currentOffset { - head, err := a.objGetter.HeadObject(ctx, prevID) - if err != nil { - return nil, nil, err - } - if !a.isChild(head) { - return nil, nil, errParentAddressDiffers - } - - nextOffset := a.currentOffset - head.PayloadSize() - clampedFrom := max(from, nextOffset) - clampedTo := min(to, a.currentOffset) - if clampedFrom < clampedTo { - index := len(rngs) - rngs = append(rngs, objectSDK.Range{}) - rngs[index].SetOffset(clampedFrom - nextOffset) - rngs[index].SetLength(clampedTo - clampedFrom) - - id, _ := head.ID() - chain = append(chain, id) - } - - a.currentOffset = nextOffset - prevID, hasPrev = head.PreviousID() - } - - return chain, rngs, nil -} diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index e0a7e1da6..a53299480 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -125,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) { objID := a.addr.Object() - trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch) + trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch) if err != nil { return nil, err } diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index dfb31133c..0ee8aed53 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -28,7 +28,16 @@ func (r *request) executeOnContainer(ctx context.Context) { localStatus := r.status - for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { + for { + if r.processCurrentEpoch(ctx, localStatus) { + break + } + + // check the maximum depth has been reached + if lookupDepth == 0 { + break + } + lookupDepth-- // go to the previous epoch diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 3a50308c2..557e9a028 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { exec.execute(ctx) - return exec.err + return exec.statusError.err } -func (r *request) execute(ctx context.Context) { - r.log.Debug(ctx, logs.ServingRequest) +func (exec *request) execute(ctx context.Context) { + exec.log.Debug(ctx, logs.ServingRequest) // perform local operation - r.executeLocal(ctx) + exec.executeLocal(ctx) - r.analyzeStatus(ctx, true) + exec.analyzeStatus(ctx, true) } -func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { +func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result - switch r.status { + switch exec.status { case statusOK: - r.log.Debug(ctx, logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) + exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) - r.assemble(ctx) + exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) + exec.assemble(ctx) case statusOutOfRange: - r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) + exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - r.log.Debug(ctx, logs.GetRequestedObjectIsEC) - if r.isRaw() && execCnr { - r.executeOnContainer(ctx) - r.analyzeStatus(ctx, false) + exec.log.Debug(ctx, logs.GetRequestedObjectIsEC) + if exec.isRaw() && execCnr { + exec.executeOnContainer(ctx) + exec.analyzeStatus(ctx, false) } - r.assembleEC(ctx) + exec.assembleEC(ctx) default: - r.log.Debug(ctx, logs.OperationFinishedWithError, - zap.Error(r.err), + exec.log.Debug(ctx, logs.OperationFinishedWithError, + zap.Error(exec.err), ) var errAccessDenied *apistatus.ObjectAccessDenied - if execCnr && errors.As(r.err, &errAccessDenied) { + if execCnr && errors.As(exec.err, &errAccessDenied) { // Local get can't return access denied error, so this error was returned by // write to the output stream. So there is no need to try to find object on other nodes. return } if execCnr { - r.executeOnContainer(ctx) - r.analyzeStatus(ctx, false) + exec.executeOnContainer(ctx) + exec.analyzeStatus(ctx, false) } } } diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 3efc72065..6827018dc 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -63,7 +63,7 @@ type testClient struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { +func (e testEpochReceiver) Epoch() (uint64, error) { return uint64(e), nil } @@ -79,7 +79,7 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { +func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { opts := make([]placement.Option, 0, 4) opts = append(opts, placement.ForContainer(g.c), @@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid. opts = append(opts, placement.ForObject(*obj)) } - t, err := placement.NewTraverser(context.Background(), opts...) + t, err := placement.NewTraverser(opts...) return t, &containerCore.Container{ Value: g.c, }, err } -func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index 83ef54744..599a6f176 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -28,14 +28,14 @@ type containerStorage struct { cnt *container.Container } -func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) { coreCnt := coreContainer.Container{ Value: *cs.cnt, } return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go index 2c64244cf..0df67dec9 100644 --- a/pkg/services/object/get/remote_getter.go +++ b/pkg/services/object/get/remote_getter.go @@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob if err != nil { return nil, err } - epoch, err := g.es.Epoch(ctx) + epoch, err := g.es.Epoch() if err != nil { return nil, err } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index 268080486..be0950c60 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -122,7 +122,7 @@ func (r *request) initEpoch(ctx context.Context) bool { return true } - e, err := r.epochSource.Epoch(ctx) + e, err := r.epochSource.Epoch() switch { default: @@ -141,7 +141,7 @@ func (r *request) initEpoch(ctx context.Context) bool { func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { obj := addr.Object() - t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch) + t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) switch { default: diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index a103f5a7f..9ec10b5f2 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = l + s.log = l.With(zap.String("component", "Object.Get service")) } } diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go index 664366d1b..9669afdba 100644 --- a/pkg/services/object/get/types.go +++ b/pkg/services/object/get/types.go @@ -20,11 +20,11 @@ import ( ) type epochSource interface { - Epoch(ctx context.Context) (uint64, error) + Epoch() (uint64, error) } type traverserGenerator interface { - GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } type keyStorage interface { diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go index 308ccd512..7d26a38c3 100644 --- a/pkg/services/object/get/v2/get_range_hash.go +++ b/pkg/services/object/get/v2/get_range_hash.go @@ -22,7 +22,7 @@ import ( // GetRangeHash calls internal service and returns v2 response. func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - forward, err := s.needToForwardGetRangeHashRequest(ctx, req) + forward, err := s.needToForwardGetRangeHashRequest(req) if err != nil { return nil, err } @@ -48,7 +48,7 @@ type getRangeForwardParams struct { address oid.Address } -func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { +func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { if req.GetMetaHeader().GetTTL() <= 1 { return getRangeForwardParams{}, nil } @@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *obj } result.address = addr - cont, err := s.contSource.Get(ctx, addr.Container()) + cont, err := s.contSource.Get(addr.Container()) if err != nil { return result, fmt.Errorf("(%T) could not get container: %w", s, err) } - epoch, err := s.netmapSource.Epoch(ctx) + epoch, err := s.netmapSource.Epoch() if err != nil { return result, fmt.Errorf("(%T) could not get epoch: %w", s, err) } - nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch) + nm, err := s.netmapSource.GetNetMapByEpoch(epoch) if err != nil { return result, fmt.Errorf("(%T) could not get netmap: %w", s, err) } @@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *obj builder := placement.NewNetworkMapBuilder(nm) objectID := addr.Object() - nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy()) + nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy()) if err != nil { return result, fmt.Errorf("(%T) could not build object placement: %w", s, err) } diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index 0ec8912fd..fc483b74b 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "Object.Get V2 service")) } } diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index 0d73bcd4d..98207336c 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec p.SetHeader(objV2.GetHeader()) p.SetSignature(objV2.GetSignature()) - return s.Send(newResponse(p)) + return s.GetObjectStream.Send(newResponse(p)) } func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { p := new(objectV2.GetObjectPartChunk) p.SetChunk(chunk) - return s.Send(newResponse(p)) + return s.GetObjectStream.Send(newResponse(p)) } func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { @@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { } func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.Send(newRangeResponse(chunk)) + return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) } func newRangeResponse(p []byte) *objectV2.GetRangeResponse { diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index e699a3779..bfa7fd619 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -3,7 +3,6 @@ package getsvc import ( "context" "crypto/sha256" - "errors" "hash" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -183,7 +182,9 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran default: return nil, errUnknownChechsumType(t) case refs.SHA256: - p.SetHashGenerator(sha256.New) + p.SetHashGenerator(func() hash.Hash { + return sha256.New() + }) case refs.TillichZemor: p.SetHashGenerator(func() hash.Hash { return tz.New() @@ -359,20 +360,19 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { var err error + + defer func() { + stop = err == nil + + if stop || firstErr == nil { + firstErr = err + } + + // would be nice to log otherwise + }() + res, err = f(ctx, addr, c, key) - // non-status logic error that could be returned - // from the SDK client; should not be considered - // as a connection error - var siErr *objectSDK.SplitInfoError - var eiErr *objectSDK.ECInfoError - - stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr) - - if stop || firstErr == nil { - firstErr = err - } - return }) diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 6a6ee0f0f..19748e938 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -4,7 +4,6 @@ import ( "context" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) @@ -35,7 +34,7 @@ type ( } MetricRegister interface { - AddRequestDuration(string, time.Duration, bool, string) + AddRequestDuration(string, time.Duration, bool) AddPayloadSize(string, int) } ) @@ -52,7 +51,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er if m.enabled { t := time.Now() defer func() { - m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) + m.metrics.AddRequestDuration("Get", time.Since(t), err == nil) }() err = m.next.Get(req, &getStreamMetric{ ServerStream: stream, @@ -107,7 +106,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl res, err := m.next.PutSingle(ctx, request) - m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) + m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil) if err == nil { m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload())) } @@ -123,7 +122,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) res, err := m.next.Head(ctx, request) - m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) + m.metrics.AddRequestDuration("Head", time.Since(t), err == nil) return res, err } @@ -136,7 +135,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream) err := m.next.Search(req, stream) - m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) + m.metrics.AddRequestDuration("Search", time.Since(t), err == nil) return err } @@ -149,7 +148,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque res, err := m.next.Delete(ctx, request) - m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) + m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil) return res, err } return m.next.Delete(ctx, request) @@ -161,7 +160,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR err := m.next.GetRange(req, stream) - m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) + m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil) return err } @@ -174,7 +173,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa res, err := m.next.GetRangeHash(ctx, request) - m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) + m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil) return res, err } @@ -210,7 +209,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) + s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil) return res, err } @@ -224,7 +223,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) + s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil) return res, err } diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go index 5d298bfed..953f82b48 100644 --- a/pkg/services/object/patch/service.go +++ b/pkg/services/object/patch/service.go @@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config, // Patch calls internal service and returns v2 object streamer. func (s *Service) Patch() (object.PatchObjectStream, error) { - nodeKey, err := s.KeyStorage.GetKey(nil) + nodeKey, err := s.Config.KeyStorage.GetKey(nil) if err != nil { return nil, err } diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index ff13b1d3e..91b4efdc1 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { } oV2.GetHeader().SetOwnerID(ownerID) - target, err := target.New(ctx, objectwriter.Params{ + target, err := target.New(objectwriter.Params{ Config: s.Config, Common: commonPrm, Header: objectSDK.NewFromV2(oV2), @@ -195,12 +195,7 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { patch.FromV2(req.GetBody()) if !s.nonFirstSend { - err := s.patcher.ApplyHeaderPatch(ctx, - patcher.ApplyHeaderPatchPrm{ - NewSplitHeader: patch.NewSplitHeader, - NewAttributes: patch.NewAttributes, - ReplaceAttributes: patch.ReplaceAttributes, - }) + err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes) if err != nil { return fmt.Errorf("patch attributes: %w", err) } @@ -219,9 +214,6 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { } func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { - if s.patcher == nil { - return nil, errors.New("uninitialized patch streamer") - } patcherResp, err := s.patcher.Close(ctx) if err != nil { return nil, err diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 7aeb5857d..5cc0a5722 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -26,6 +27,8 @@ func NewService(ks *objutil.KeyStorage, opts ...objectwriter.Option, ) *Service { c := &objectwriter.Config{ + RemotePool: util.NewPseudoWorkerPool(), + LocalPool: util.NewPseudoWorkerPool(), Logger: logger.NewLoggerWrapper(zap.L()), KeyStorage: ks, ClientConstructor: cc, @@ -56,8 +59,8 @@ func NewService(ks *objutil.KeyStorage, } } -func (s *Service) Put() (*Streamer, error) { +func (p *Service) Put() (*Streamer, error) { return &Streamer{ - Config: s.Config, + Config: p.Config, }, nil } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 90f473254..5219e64d5 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -21,6 +21,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" @@ -86,7 +87,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest } func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.validarePutSingleSize(ctx, obj); err != nil { + if err := s.validarePutSingleSize(obj); err != nil { return object.ContentMeta{}, err } @@ -97,12 +98,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) return s.validatePutSingleObject(ctx, obj) } -func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error { +func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error { if uint64(len(obj.Payload())) != obj.PayloadSize() { return target.ErrWrongPayloadSize } - maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) + maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize() if obj.PayloadSize() > maxAllowedSize { return target.ErrExceedingMaxSize } @@ -153,7 +154,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { localOnly := req.GetMetaHeader().GetTTL() <= 1 - placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly) + placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly) if err != nil { return err } @@ -166,13 +167,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o } func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.NewNodeIterator(placement.placementOptions) + iter := s.Config.NewNodeIterator(placement.placementOptions) iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast signer := &putSingleRequestSigner{ req: req, - keyStorage: s.KeyStorage, + keyStorage: s.Config.KeyStorage, signer: &sync.Once{}, } @@ -186,13 +187,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace if err != nil { return err } - key, err := s.KeyStorage.GetKey(nil) + key, err := s.Config.KeyStorage.GetKey(nil) if err != nil { return err } signer := &putSingleRequestSigner{ req: req, - keyStorage: s.KeyStorage, + keyStorage: s.Config.KeyStorage, signer: &sync.Once{}, } @@ -218,14 +219,14 @@ type putSinglePlacement struct { resetSuccessAfterOnBroadcast bool } -func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { +func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { var result putSinglePlacement cnrID, ok := obj.ContainerID() if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) + cnrInfo, err := s.Config.ContainerSource.Get(cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -249,14 +250,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) + latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } builder := placement.NewNetworkMapBuilder(latestNetmap) if localOnly { result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) + builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys) } result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) return result, nil @@ -273,7 +274,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - c, err := s.ClientConstructor.Get(info) + c, err := s.Config.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } @@ -283,7 +284,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { localTarget := &objectwriter.LocalTarget{ - Storage: s.LocalStore, + Storage: s.Config.LocalStore, Container: container, } return localTarget.WriteObject(ctx, obj, meta) @@ -317,11 +318,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, + s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), zap.Stringer("container_id", cnrID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index 19768b7fa..f71309d31 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -36,7 +36,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { } var err error - p.target, err = target.New(ctx, prmTarget) + p.target, err = target.New(prmTarget) if err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) } diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index f0c648187..36b514fbc 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -56,10 +56,10 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) s.saveChunks = v.GetSignature() != nil if s.saveChunks { - maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx) + maxSz := s.stream.MaxSizeSrc.MaxObjectSize() s.sizes = &sizes{ - payloadSz: v.GetHeader().GetPayloadLength(), + payloadSz: uint64(v.GetHeader().GetPayloadLength()), } // check payload size limit overflow diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go deleted file mode 100644 index 01eb1ea8d..000000000 --- a/pkg/services/object/qos.go +++ /dev/null @@ -1,145 +0,0 @@ -package object - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" -) - -var _ ServiceServer = (*qosObjectService)(nil) - -type AdjustIOTag interface { - AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context -} - -type qosObjectService struct { - next ServiceServer - adj AdjustIOTag -} - -func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer { - return &qosObjectService{ - next: next, - adj: adjIOTag, - } -} - -func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Delete(ctx, req) -} - -func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error { - ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Get(req, &qosReadStream[*object.GetResponse]{ - ctxF: func() context.Context { return ctx }, - sender: s, - }) -} - -func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error { - ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{ - ctxF: func() context.Context { return ctx }, - sender: s, - }) -} - -func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.GetRangeHash(ctx, req) -} - -func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Head(ctx, req) -} - -func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) { - s, err := q.next.Patch(ctx) - if err != nil { - return nil, err - } - return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{ - s: s, - adj: q.adj, - }, nil -} - -func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) { - s, err := q.next.Put(ctx) - if err != nil { - return nil, err - } - return &qosWriteStream[*object.PutRequest, *object.PutResponse]{ - s: s, - adj: q.adj, - }, nil -} - -func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.PutSingle(ctx, req) -} - -func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error { - ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) - return q.next.Search(req, &qosReadStream[*object.SearchResponse]{ - ctxF: func() context.Context { return ctx }, - sender: s, - }) -} - -type qosSend[T any] interface { - Send(T) error -} - -type qosReadStream[T any] struct { - sender qosSend[T] - ctxF func() context.Context -} - -func (g *qosReadStream[T]) Context() context.Context { - return g.ctxF() -} - -func (g *qosReadStream[T]) Send(resp T) error { - return g.sender.Send(resp) -} - -type qosVerificationHeader interface { - GetVerificationHeader() *session.RequestVerificationHeader -} - -type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { - Send(context.Context, TReq) error - CloseAndRecv(context.Context) (TResp, error) -} - -type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { - s qosSendRecv[TReq, TResp] - adj AdjustIOTag - - ioTag string - ioTagDefined bool -} - -func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { - if q.ioTagDefined { - ctx = tagging.ContextWithIOTag(ctx, q.ioTag) - } - return q.s.CloseAndRecv(ctx) -} - -func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { - if !q.ioTagDefined { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) - q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx) - } - assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment") - ctx = tagging.ContextWithIOTag(ctx, q.ioTag) - return q.s.Send(ctx, req) -} diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go new file mode 100644 index 000000000..eb4041f80 --- /dev/null +++ b/pkg/services/object/request_context.go @@ -0,0 +1,24 @@ +package object + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" +) + +type RequestContextKeyT struct{} + +var RequestContextKey = RequestContextKeyT{} + +// RequestContext is a context passed between middleware handlers. +type RequestContext struct { + Namespace string + + SenderKey []byte + + ContainerOwner user.ID + + Role acl.Role + + BearerToken *bearer.Token +} diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index 60d469b11..e82f999cf 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -20,7 +20,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error { ) // initialize epoch number - if err := exec.initEpoch(ctx); err != nil { + if err := exec.initEpoch(); err != nil { return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err) } @@ -48,7 +48,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { zap.Uint64("number", exec.curProcEpoch), ) - traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch) + traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch) if err != nil { return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err) } @@ -114,9 +114,9 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { return nil } -func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) { +func (exec *execCtx) getContainer() (containerSDK.Container, error) { cnrID := exec.containerID() - cnr, err := exec.svc.containerSource.Get(ctx, cnrID) + cnr, err := exec.svc.containerSource.Get(cnrID) if err != nil { return containerSDK.Container{}, err } diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index ced51ecce..eb9635f14 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -1,8 +1,6 @@ package searchsvc import ( - "context" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -50,13 +48,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 { return exec.prm.common.NetmapLookupDepth() } -func (exec *execCtx) initEpoch(ctx context.Context) error { +func (exec *execCtx) initEpoch() error { exec.curProcEpoch = exec.netmapEpoch() if exec.curProcEpoch > 0 { return nil } - e, err := exec.svc.currentEpochReceiver.Epoch(ctx) + e, err := exec.svc.currentEpochReceiver.Epoch() if err != nil { return err } diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 918ad421f..0a40025e1 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -6,7 +6,6 @@ import ( "crypto/sha256" "errors" "fmt" - "slices" "strconv" "testing" @@ -59,7 +58,7 @@ type simpleIDWriter struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { +func (e testEpochReceiver) Epoch() (uint64, error) { return uint64(e), nil } @@ -82,8 +81,8 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { - t, err := placement.NewTraverser(context.Background(), +func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { + t, err := placement.NewTraverser( placement.ForContainer(g.c), placement.UseBuilder(g.b[epoch]), placement.WithoutSuccessTracking(), @@ -91,7 +90,7 @@ func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID return t, &containerCore.Container{Value: g.c}, err } -func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) @@ -104,7 +103,8 @@ func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, o return nil, errors.New("vectors for address not found") } - res := slices.Clone(vs) + res := make([][]netmap.NodeInfo, len(vs)) + copy(res, vs) return res, nil } diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index 56fe56468..77d25357a 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -46,11 +46,11 @@ type cfg struct { } traverserGenerator interface { - GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } currentEpochReceiver interface { - Epoch(ctx context.Context) (uint64, error) + Epoch() (uint64, error) } keyStore *util.KeyStorage @@ -94,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "Object.Search service")) } } diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index 0be5345b9..910384a0b 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -2,7 +2,6 @@ package searchsvc import ( "context" - "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -54,7 +53,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error { } // exclude processed address - list = slices.Delete(list, i, i+1) + list = append(list[:i], list[i+1:]...) i-- } @@ -114,7 +113,7 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c } func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { - cnr, err := exec.getContainer(ctx) + cnr, err := exec.getContainer() if err != nil { return nil, err } diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index fd8e926dd..2b44227a5 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -96,8 +96,7 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - err = fmt.Errorf("could not close stream and receive response: %w", err) - resp = new(object.PutResponse) + return nil, fmt.Errorf("could not close stream and receive response: %w", err) } } @@ -133,8 +132,7 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - err = fmt.Errorf("could not close stream and receive response: %w", err) - resp = new(object.PatchResponse) + return nil, fmt.Errorf("could not close stream and receive response: %w", err) } } diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index b446d3605..0b3676edb 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error { var newResp *object.SearchResponse - for { + for ln := uint64(len(ids)); ; { if newResp == nil { newResp = new(object.SearchResponse) newResp.SetBody(body) } - cut := min(s.addrAmount, uint64(len(ids))) + cut := min(s.addrAmount, ln) body.SetIDList(ids[:cut]) newResp.SetMetaHeader(resp.GetMetaHeader()) diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index b10826226..2c1e053ac 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -17,3 +17,11 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net zap.Error(err), ) } + +// LogWorkerPoolError writes debug error message of object worker pool to provided logger. +func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { + l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, + zap.String("request", req), + zap.Error(err), + ) +} diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go index f74b0aab9..1bd39f9ea 100644 --- a/pkg/services/object/util/placement.go +++ b/pkg/services/object/util/placement.go @@ -1,9 +1,7 @@ package util import ( - "context" "fmt" - "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -45,8 +43,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu } } -func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) +func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -78,8 +76,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac } } -func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) +func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -94,7 +92,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o } if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - vs[i] = slices.Delete(vs[i], j, j+1) + vs[i] = append(vs[i][:j], vs[i][j+1:]...) j-- } } @@ -124,15 +122,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav // GenerateTraverser generates placement Traverser for provided object address // using epoch-th network map. -func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { +func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { // get network map by epoch - nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch) + nm, err := g.netMapSrc.GetNetMapByEpoch(epoch) if err != nil { return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err) } // get container related container - cnr, err := g.cnrSrc.Get(ctx, idCnr) + cnr, err := g.cnrSrc.Get(idCnr) if err != nil { return nil, nil, fmt.Errorf("could not get container: %w", err) } @@ -162,7 +160,7 @@ func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID ) } - t, err := placement.NewTraverser(ctx, traverseOpts...) + t, err := placement.NewTraverser(traverseOpts...) if err != nil { return nil, nil, err } diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go index 0f24a9d96..45e6df339 100644 --- a/pkg/services/object_manager/placement/metrics.go +++ b/pkg/services/object_manager/placement/metrics.go @@ -2,90 +2,24 @@ package placement import ( "errors" - "fmt" - "maps" - "math" "strings" - "sync" - "sync/atomic" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) const ( attrPrefix = "$attribute:" - - geoDistance = "$geoDistance" ) type Metric interface { CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int } -type metricsParser struct { - locodeDBPath string - locodes map[string]locodedb.Point -} - -type MetricParser interface { - ParseMetrics([]string) ([]Metric, error) -} - -func NewMetricsParser(locodeDBPath string) (MetricParser, error) { - return &metricsParser{ - locodeDBPath: locodeDBPath, - }, nil -} - -func (p *metricsParser) initLocodes() error { - if len(p.locodes) != 0 { - return nil +func ParseMetric(raw string) (Metric, error) { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + return NewAttributeMetric(attr), nil } - if len(p.locodeDBPath) > 0 { - p.locodes = make(map[string]locodedb.Point) - locodeDB := locodebolt.New(locodebolt.Prm{ - Path: p.locodeDBPath, - }, - locodebolt.ReadOnly(), - ) - err := locodeDB.Open() - if err != nil { - return err - } - defer locodeDB.Close() - err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { - p.locodes[k] = v - }) - if err != nil { - return err - } - return nil - } - return errors.New("set path to locode database") -} - -func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { - var metrics []Metric - for _, raw := range priority { - if attr, found := strings.CutPrefix(raw, attrPrefix); found { - metrics = append(metrics, NewAttributeMetric(attr)) - } else if raw == geoDistance { - err := p.initLocodes() - if err != nil { - return nil, err - } - if len(p.locodes) == 0 { - return nil, fmt.Errorf("provide locodes database for metric %s", raw) - } - m := NewGeoDistanceMetric(p.locodes) - metrics = append(metrics, m) - } else { - return nil, fmt.Errorf("unsupported priority metric %s", raw) - } - } - return metrics, nil + return nil, errors.New("unsupported priority metric") } // attributeMetric describes priority metric based on attribute. @@ -107,79 +41,3 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node func NewAttributeMetric(attr string) Metric { return &attributeMetric{attribute: attr} } - -// geoDistanceMetric describes priority metric based on attribute. -type geoDistanceMetric struct { - locodes map[string]locodedb.Point - distance *atomic.Pointer[map[string]int] - mtx sync.Mutex -} - -func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { - d := atomic.Pointer[map[string]int]{} - m := make(map[string]int) - d.Store(&m) - gm := &geoDistanceMetric{ - locodes: locodes, - distance: &d, - } - return gm -} - -// CalculateValue return distance in kilometers between current node and provided, -// if coordinates for provided node found. In other case return math.MaxInt. -func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { - fl := from.LOCODE() - tl := to.LOCODE() - if fl == tl { - return 0 - } - m := gm.distance.Load() - if v, ok := (*m)[fl+tl]; ok { - return v - } - return gm.calculateDistance(fl, tl) -} - -func (gm *geoDistanceMetric) calculateDistance(from, to string) int { - gm.mtx.Lock() - defer gm.mtx.Unlock() - od := gm.distance.Load() - if v, ok := (*od)[from+to]; ok { - return v - } - nd := maps.Clone(*od) - var dist int - pointFrom, okFrom := gm.locodes[from] - pointTo, okTo := gm.locodes[to] - if okFrom && okTo { - dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) - } else { - dist = math.MaxInt - } - nd[from+to] = dist - gm.distance.Store(&nd) - - return dist -} - -// distance return amount of KM between two points. -// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. -// Original implementation can be found here https://www.geodatasource.com/developers/go. -func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { - radLat1 := math.Pi * lt1 / 180 - radLat2 := math.Pi * lt2 / 180 - radTheta := math.Pi * (ln1 - ln2) / 180 - - dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) - - if dist > 1 { - dist = 1 - } - - dist = math.Acos(dist) - dist = dist * 180 / math.Pi - dist = dist * 60 * 1.1515 * 1.609344 - - return dist -} diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go index b3f8d9c03..1782e27ea 100644 --- a/pkg/services/object_manager/placement/netmap.go +++ b/pkg/services/object_manager/placement/netmap.go @@ -1,7 +1,6 @@ package placement import ( - "context" "crypto/sha256" "fmt" @@ -36,12 +35,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder { } } -func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) { +func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) { return s.nm, nil } -func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc) +func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + nm, err := netmap.GetLatestNetworkMap(b.nmSrc) if err != nil { return nil, fmt.Errorf("could not get network map: %w", err) } diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index a3f9af959..6a949e938 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -1,7 +1,6 @@ package placement import ( - "context" "errors" "fmt" "slices" @@ -22,7 +21,7 @@ type Builder interface { // // Must return all container nodes if object identifier // is nil. - BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) + BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) } type NodeState interface { @@ -79,7 +78,7 @@ func defaultCfg() *cfg { } // NewTraverser creates, initializes with options and returns Traverser instance. -func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { +func NewTraverser(opts ...Option) (*Traverser, error) { cfg := defaultCfg() for i := range opts { @@ -99,7 +98,7 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy) } - ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy) + ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy) if err != nil { return nil, fmt.Errorf("could not build placement: %w", err) } @@ -121,7 +120,10 @@ func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { } rem = []int{-1, -1} - sortedVector := sortVector(cfg, unsortedVector) + sortedVector, err := sortVector(cfg, unsortedVector) + if err != nil { + return nil, err + } ns = [][]netmap.NodeInfo{sortedVector, regularVector} } else if cfg.flatSuccess != nil { ns = flatNodes(ns) @@ -186,7 +188,7 @@ type nodeMetrics struct { metrics []int } -func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo { +func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, error) { nm := make([]nodeMetrics, len(unsortedVector)) node := cfg.nodeState.LocalNodeInfo() @@ -207,7 +209,7 @@ func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo { for i := range unsortedVector { sortedVector[i] = unsortedVector[nm[i].index] } - return sortedVector + return sortedVector, nil } // Node is a descriptor of storage node with information required for intra-container communication. @@ -288,8 +290,8 @@ func (t *Traverser) Next() []Node { func (t *Traverser) skipEmptyVectors() { for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 { - t.vectors = slices.Delete(t.vectors, i, i+1) - t.rem = slices.Delete(t.rem, i, i+1) + t.vectors = append(t.vectors[:i], t.vectors[i+1:]...) + t.rem = append(t.rem[:i], t.rem[i+1:]...) i-- } else { break diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index d1370f21e..f96e5c8a7 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -1,8 +1,6 @@ package placement import ( - "context" - "slices" "strconv" "testing" @@ -19,7 +17,7 @@ type testBuilder struct { vectors [][]netmap.NodeInfo } -func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return b.vectors, nil } @@ -35,7 +33,8 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { vc := make([][]netmap.NodeInfo, 0, len(v)) for i := range v { - ns := slices.Clone(v[i]) + ns := make([]netmap.NodeInfo, len(v[i])) + copy(ns, v[i]) vc = append(vc, ns) } @@ -103,7 +102,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithoutSuccessTracking(), @@ -132,7 +131,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -161,7 +160,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), ) @@ -202,7 +201,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodes, cnr := testPlacement(selectors, replicas) - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local) @@ -277,7 +276,7 @@ func TestTraverserRemValues(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithCopyNumbers(testCase.copyNumbers), @@ -323,7 +322,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -375,7 +374,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -446,7 +445,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { NewAttributeMetric("UN-LOCODE"), } - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -484,7 +483,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { nodesCopy = copyVectors(nodes) - tr, err = NewTraverser(context.Background(), + tr, err = NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -517,7 +516,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { nodesCopy = copyVectors(nodes) - tr, err = NewTraverser(context.Background(), + tr, err = NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -568,7 +567,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser(context.Background(), + tr, err := NewTraverser( ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -601,53 +600,4 @@ func TestTraverserPriorityMetrics(t *testing.T) { next = tr.Next() require.Nil(t, next) }) - - t.Run("one rep one geo metric", func(t *testing.T) { - t.Skip() - selectors := []int{2} - replicas := []int{2} - - nodes, cnr := testPlacement(selectors, replicas) - - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") - - sdkNode := testNode(2) - sdkNode.SetAttribute("UN-LOCODE", "FI HEL") - - nodesCopy := copyVectors(nodes) - - parser, err := NewMetricsParser("/path/to/locode_db") - require.NoError(t, err) - m, err := parser.ParseMetrics([]string{geoDistance}) - require.NoError(t, err) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Without priority metric `$geoDistance` the order will be: - // [ {Node_0 RU MOW}, {Node_1 RU LED}] - // With priority metric `$geoDistance` the order should be: - // [ {Node_1 RU LED}, {Node_0 RU MOW}] - next := tr.Next() - require.NotNil(t, next) - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) } diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index e5f001d5a..a4e36c2dc 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -61,8 +61,10 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) - } else if ts != nil { - return g.handleTS(ctx, addrStr, ts, epoch) + } else { + if ts != nil { + return g.handleTS(ctx, addrStr, ts, epoch) + } } // requested tombstone not diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 2147a32fe..67ddf316f 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -3,7 +3,6 @@ package tombstone import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -50,7 +49,9 @@ func NewChecker(oo ...Option) *ExpirationChecker { panicOnNil(cfg.tsSource, "Tombstone source") cache, err := lru.New[string, uint64](cfg.cacheSize) - assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) + if err != nil { + panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err)) + } return &ExpirationChecker{ cache: cache, diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go index 975941847..1ff07b05a 100644 --- a/pkg/services/object_manager/tombstone/source/source.go +++ b/pkg/services/object_manager/tombstone/source/source.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -39,7 +38,9 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { // Panics if any of the provided options does not allow // constructing a valid tombstone local Source. func NewSource(p TombstoneSourcePrm) Source { - assert.False(p.s == nil, "Tombstone source: nil object service") + if p.s == nil { + panic("Tombstone source: nil object service") + } return Source(p) } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index dcaaec0b4..bdfc4344b 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -28,10 +28,10 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er )) defer span.End() - cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container()) + cnr, err := p.cnrSrc.Get(objInfo.Address.Container()) if err != nil { if client.IsErrContainerNotFound(err) { - existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container()) + existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container()) if errWasRemoved != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved) } else if existed { @@ -56,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { idObj := objInfo.Address.Object() idCnr := objInfo.Address.Container() - nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy) + nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -110,7 +110,6 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // Number of copies that are stored on maintenance nodes. var uncheckedCopies int - var candidates []netmap.NodeInfo for i := 0; shortage > 0 && i < len(nodes); i++ { select { case <-ctx.Done(): @@ -118,68 +117,71 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe default: } - var err error - st := checkedNodes.processStatus(nodes[i]) - if !st.Processed() { - st, err = p.checkStatus(ctx, addr, nodes[i]) - checkedNodes.set(nodes[i], st) - if st == nodeDoesNotHoldObject { - // 1. This is the first time the node is encountered (`!st.Processed()`). - // 2. The node does not hold object (`st == nodeDoesNotHoldObject`). - // So we need to try to put an object to it. - candidates = append(candidates, nodes[i]) - continue - } - } - - switch st { - case nodeIsLocal: + if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) { requirements.needLocalCopy = true shortage-- - case nodeIsUnderMaintenance: - shortage-- - uncheckedCopies++ + } else if nodes[i].Status().IsMaintenance() { + shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) + } else { + if status := checkedNodes.processStatus(nodes[i]); status.Processed() { + if status == nodeHoldsObject { + // node already contains replica, no need to replicate + nodes = append(nodes[:i], nodes[i+1:]...) + i-- + shortage-- + } - p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, - zap.String("node", netmap.StringifyPublicKey(nodes[i]))) - case nodeHoldsObject: - shortage-- - case nodeDoesNotHoldObject: - case nodeStatusUnknown: - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, - zap.Stringer("object", addr), - zap.Error(err)) - default: - panic("unreachable") + continue + } + + callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) + + _, err := p.remoteHeader(callCtx, nodes[i], addr, false) + + cancel() + + if err == nil { + shortage-- + checkedNodes.submitReplicaHolder(nodes[i]) + } else { + if client.IsErrObjectNotFound(err) { + checkedNodes.submitReplicaCandidate(nodes[i]) + continue + } else if client.IsErrNodeUnderMaintenance(err) { + shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) + } else { + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, + zap.Stringer("object", addr), + zap.Error(err), + ) + } + } } + + nodes = append(nodes[:i], nodes[i+1:]...) + i-- } - p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies) + p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies) } -func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) { - if p.netmapKeys.IsLocalKey(node.PublicKey()) { - return nodeIsLocal, nil - } - if node.Status().IsMaintenance() { - return nodeIsUnderMaintenance, nil - } +// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values +// +// consider remote nodes under maintenance as problem OK. Such +// nodes MAY not respond with object, however, this is how we +// prevent spam with new replicas. +// However, additional copies should not be removed in this case, +// because we can remove the only copy this way. +func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { + checkedNodes.submitReplicaHolder(node) + shortage-- + uncheckedCopies++ - callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) - _, err := p.remoteHeader(callCtx, node, addr, false) - cancel() - - if err == nil { - return nodeHoldsObject, nil - } - if client.IsErrObjectNotFound(err) { - return nodeDoesNotHoldObject, nil - } - if client.IsErrNodeUnderMaintenance(err) { - return nodeIsUnderMaintenance, nil - } - return nodeStatusUnknown, err + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, + zap.String("node", netmap.StringifyPublicKey(node)), + ) + return shortage, uncheckedCopies } func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go index 69879c439..d4c7ccbf9 100644 --- a/pkg/services/policer/check_test.go +++ b/pkg/services/policer/check_test.go @@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) { cache.SubmitSuccessfulReplication(node) require.Equal(t, cache.processStatus(node), nodeHoldsObject) - cache.set(node, nodeDoesNotHoldObject) + cache.submitReplicaCandidate(node) require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject) - cache.set(node, nodeHoldsObject) + cache.submitReplicaHolder(node) require.Equal(t, cache.processStatus(node), nodeHoldsObject) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index fbdeb3148..f6d3b9ea1 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -39,7 +39,7 @@ func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectco // All of them must be stored on all of the container nodes. func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { objID := objInfo.Address.Object() - nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy) + nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -69,7 +69,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec } func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { - nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) + nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { var removeLocalChunk bool requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { // current node is required node, we are happy return ecChunkProcessResult{ validPlacement: true, @@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec if uint32(i) == objInfo.ECInfo.Total { break } - if p.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} } } @@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { var eiErr *objectSDK.ECInfoError for _, n := range nodes { - if p.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { continue } _, err := p.remoteHeader(ctx, n, parentAddress, true) @@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info return } var err error - if p.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { _, err = p.localHeader(ctx, parentAddress) } else { _, err = p.remoteHeader(ctx, n, parentAddress, true) @@ -283,7 +283,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info } } else if client.IsErrObjectAlreadyRemoved(err) { restore = false - } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { + } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, @@ -343,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, pID, _ := part.ID() addr.SetObject(pID) targetNode := nodes[idx%len(nodes)] - if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ Addr: addr, Obj: part, @@ -371,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I var obj *objectSDK.Object var err error for _, node := range nodes { - if p.netmapKeys.IsLocalKey(node.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { obj, err = p.localObject(egCtx, objID) } else { obj, err = p.remoteObject(egCtx, node, objID) diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go index c6980536b..e230153f9 100644 --- a/pkg/services/policer/ec_test.go +++ b/pkg/services/policer/ec_test.go @@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } @@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index c2157de5d..cd47cb0fc 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -8,9 +8,6 @@ const ( nodeNotProcessed nodeProcessStatus = iota nodeDoesNotHoldObject nodeHoldsObject - nodeStatusUnknown - nodeIsUnderMaintenance - nodeIsLocal ) func (st nodeProcessStatus) Processed() bool { @@ -18,19 +15,37 @@ func (st nodeProcessStatus) Processed() bool { } // nodeCache tracks Policer's check progress. -type nodeCache map[uint64]nodeProcessStatus +type nodeCache map[uint64]bool func newNodeCache() nodeCache { - return make(map[uint64]nodeProcessStatus) + return make(map[uint64]bool) } -func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) { +func (n nodeCache) set(node netmap.NodeInfo, val bool) { n[node.Hash()] = val } +// submits storage node as a candidate to store the object replica in case of +// shortage. +func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) { + n.set(node, false) +} + +// submits storage node as a current object replica holder. +func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) { + n.set(node, true) +} + // processStatus returns current processing status of the storage node. func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { - return n[node.Hash()] + switch val, ok := n[node.Hash()]; { + case !ok: + return nodeNotProcessed + case val: + return nodeHoldsObject + default: + return nodeDoesNotHoldObject + } } // SubmitSuccessfulReplication marks given storage node as a current object @@ -38,5 +53,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { // // SubmitSuccessfulReplication implements replicator.TaskResult. func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) { - n.set(node, nodeHoldsObject) + n.submitReplicaHolder(node) } diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index c91e7cc7c..4e8bacfec 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -1,13 +1,12 @@ package policer import ( - "fmt" "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" + "go.uber.org/zap" ) type objectsInWork struct { @@ -55,8 +54,12 @@ func New(opts ...Option) *Policer { opts[i](c) } + c.log = c.log.With(zap.String("component", "Object Policer")) + cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) + if err != nil { + panic(err) + } return &Policer{ cfg: c, diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index 049c33753..4e17e98a8 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "slices" "sort" "testing" "time" @@ -37,10 +36,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) { // Container source and bury function buryCh := make(chan oid.Address) containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -79,7 +78,6 @@ func TestProcessObject(t *testing.T) { maintenanceNodes []int wantRemoveRedundant bool wantReplicateTo []int - headResult map[int]error ecInfo *objectcore.ECInfo }{ { @@ -129,7 +127,7 @@ func TestProcessObject(t *testing.T) { nodeCount: 2, policy: `REP 2 REP 2`, placement: [][]int{{0, 1}, {0, 1}}, - wantReplicateTo: []int{1}, + wantReplicateTo: []int{1, 1}, // is this actually good? }, { desc: "lock object must be replicated to all nodes", @@ -147,14 +145,6 @@ func TestProcessObject(t *testing.T) { objHolders: []int{1}, maintenanceNodes: []int{2}, }, - { - desc: "preserve local copy when node response with MAINTENANCE", - nodeCount: 3, - policy: `REP 2`, - placement: [][]int{{1, 2}}, - objHolders: []int{1}, - headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)}, - }, { desc: "lock object must be replicated to all EC nodes", objType: objectSDK.TypeLock, @@ -171,14 +161,6 @@ func TestProcessObject(t *testing.T) { placement: [][]int{{0, 1, 2}}, wantReplicateTo: []int{1, 2}, }, - { - desc: "do not remove local copy when MAINTENANCE status is cached", - objType: objectSDK.TypeRegular, - nodeCount: 3, - policy: `REP 1 REP 1`, - placement: [][]int{{1, 2}, {1, 0}}, - headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)}, - }, } for i := range tests { @@ -222,14 +204,11 @@ func TestProcessObject(t *testing.T) { t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a) return nil, errors.New("unexpected object head") } - if ti.headResult != nil { - if err, ok := ti.headResult[index]; ok { - return nil, err + for _, i := range ti.objHolders { + if index == i { + return nil, nil } } - if slices.Contains(ti.objHolders, index) { - return nil, nil - } return nil, new(apistatus.ObjectNotFound) } @@ -238,14 +217,14 @@ func TestProcessObject(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { if id.Equals(addr.Container()) { return cnr, nil } t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container()) return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -303,10 +282,10 @@ func TestProcessObjectError(t *testing.T) { cnr := &container.Container{} cnr.Value.Init() source := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(id cid.ID) (*container.DelInfo, error) { return nil, new(apistatus.ContainerNotFound) }, } @@ -351,10 +330,10 @@ func TestIteratorContract(t *testing.T) { } containerSrc := containerSrc{ - get: func(ctx context.Context, id cid.ID) (*container.Container, error) { + get: func(id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -443,22 +422,18 @@ func (it *sliceKeySpaceIterator) Rewind() { } type containerSrc struct { - get func(ctx context.Context, id cid.ID) (*container.Container, error) - deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error) + get func(id cid.ID) (*container.Container, error) + deletionInfo func(id cid.ID) (*container.DelInfo, error) } -func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) { - return f.get(ctx, id) -} +func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) } -func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) { - return f.deletionInfo(ctx, id) -} +func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) } // placementBuilderFunc is a placement.Builder backed by a function type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) -func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return f(c, o, p) } diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index 635a5683b..bd830d04e 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -7,9 +7,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -20,7 +18,6 @@ func (p *Policer) Run(ctx context.Context) { } func (p *Policer) shardPolicyWorker(ctx context.Context) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String()) for { select { case <-ctx.Done(): diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 8c6f0df06..69395bb02 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -44,7 +45,8 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return } @@ -63,6 +65,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T log := p.log.With( zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])), zap.Stringer("object", task.Addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) callCtx, cancel := context.WithTimeout(ctx, p.putTimeout) diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index 216fe4919..5ce929342 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -3,12 +3,12 @@ package replicator import ( "context" "errors" - "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -43,17 +43,23 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err == nil { break } - endpoints := slices.Collect(node.NetworkEndpoints()) + var endpoints []string + node.IterateNetworkEndpoints(func(s string) bool { + endpoints = append(endpoints, s) + return false + }) p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), - zap.Strings("endpoints", endpoints)) + zap.Strings("endpoints", endpoints), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } if obj == nil { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), - zap.Error(errFailedToGetObjectFromAnyNode)) + zap.Error(errFailedToGetObjectFromAnyNode), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return } @@ -61,6 +67,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go index bcad8471d..489f66ae5 100644 --- a/pkg/services/replicator/put.go +++ b/pkg/services/replicator/put.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -32,7 +33,8 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { if task.Obj == nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(errObjectNotDefined)) + zap.Error(errObjectNotDefined), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return } @@ -40,6 +42,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err)) + zap.Error(err), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index a940cef37..6910fa5af 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -7,6 +7,7 @@ import ( objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "go.uber.org/zap" ) // Replicator represents the utility that replicates @@ -44,6 +45,8 @@ func New(opts ...Option) *Replicator { opts[i](c) } + c.log = c.log.With(zap.String("component", "Object Replicator")) + return &Replicator{ cfg: c, } diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index f0591de71..12b221613 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -33,7 +33,10 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) + s.log.Debug(ctx, logs.ServingRequest, + zap.String("component", "SessionService"), + zap.String("request", "Create"), + ) respBody, err := s.exec.Create(ctx, req.GetBody()) if err != nil { diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index 132d62445..d312ea0ea 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { // enable encryption if it // was configured so if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) + rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) cfg.privateKey.D.FillBytes(rawKey) c, err := aes.NewCipher(rawKey) diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 58757ff6d..606044f8e 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -22,7 +22,7 @@ import ( ) func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) (aperequest.Request, error) { schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) if err != nil { @@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()), nativeschema.PropertyKeyActorRole: schemaRole, } - reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey) + reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey) if err != nil { return aperequest.Request{}, err } @@ -53,19 +53,15 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) } - resProps := map[string]string{ - nativeschema.ProperyKeyTreeID: treeID, - } - return aperequest.NewRequest( schemaMethod, - aperequest.NewResource(resourceName, resProps), + aperequest.NewResource(resourceName, make(map[string]string)), reqProps, ), nil } func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) error { namespace := "" cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") @@ -73,12 +69,12 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, namespace = cntNamespace } - request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) + request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey) if err != nil { return fmt.Errorf("failed to create ape request: %w", err) } - return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{ + return s.apeChecker.CheckAPE(checkercore.CheckPrm{ Request: request, Namespace: namespace, Container: cid, @@ -89,11 +85,11 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { +func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey) + props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey) if err != nil { return reqProps, err } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go index 7b209fd47..3f94925b5 100644 --- a/pkg/services/tree/ape_test.go +++ b/pkg/services/tree/ape_test.go @@ -37,7 +37,7 @@ type frostfsIDProviderMock struct { subjectsExtended map[util.Uint160]*client.SubjectExtended } -func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -45,7 +45,7 @@ func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160 return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -107,45 +107,6 @@ func TestCheckAPE(t *testing.T) { cid := cid.ID{} _ = cid.DecodeString(containerID) - t.Run("treeID rule", func(t *testing.T) { - los := inmemory.NewInmemoryLocalStorage() - mcs := inmemory.NewInmemoryMorphRuleChainStorage() - fid := newFrostfsIDProviderMock(t) - s := Service{ - cfg: cfg{ - frostfsidSubjectProvider: fid, - }, - apeChecker: checkercore.New(los, mcs, fid, &stMock{}), - } - - mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.QuotaLimitReached, - Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindResource, - Key: nativeschema.ProperyKeyTreeID, - Value: versionTreeID, - }, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) - - var chErr *checkercore.ChainRouterError - require.ErrorAs(t, err, &chErr) - require.Equal(t, chain.QuotaLimitReached, chErr.Status()) - }) - t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { los := inmemory.NewInmemoryLocalStorage() mcs := inmemory.NewInmemoryMorphRuleChainStorage() @@ -191,7 +152,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) @@ -240,7 +201,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) } diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index a11700771..ac80d0e4c 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -10,9 +10,12 @@ import ( internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" + tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" ) type clientCache struct { @@ -48,7 +51,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { c.Lock() - ccInt, ok := c.Get(netmapAddr) + ccInt, ok := c.LRU.Get(netmapAddr) c.Unlock() if ok { @@ -66,19 +69,14 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl } } - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) + cc, err := c.dialTreeService(ctx, netmapAddr) lastTry := time.Now() c.Lock() if err != nil { - c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) + c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) } else { - c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) + c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) } c.Unlock() @@ -88,3 +86,48 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl return NewTreeServiceClient(cc), nil } + +func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) { + var netAddr network.Address + if err := netAddr.FromString(netmapAddr); err != nil { + return nil, err + } + + opts := []grpc.DialOption{ + grpc.WithChainUnaryInterceptor( + metrics.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), + ), + grpc.WithChainStreamInterceptor( + metrics.NewStreamClientInterceptor(), + tracing.NewStreamClientInterceptor(), + ), + grpc.WithContextDialer(c.ds.GrpcContextDialer()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + } + + if !netAddr.IsTLSEnabled() { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, c.key); err != nil { + return nil, err + } + + cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) + defer cancel() + // perform some request to check connection + if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { + _ = cc.Close() + return nil, err + } + return cc, nil +} diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go index c641a21a2..435257550 100644 --- a/pkg/services/tree/container.go +++ b/pkg/services/tree/container.go @@ -2,7 +2,6 @@ package tree import ( "bytes" - "context" "crypto/sha256" "fmt" "sync" @@ -33,13 +32,13 @@ type containerCacheItem struct { const defaultContainerCacheSize = 10 // getContainerNodes returns nodes in the container and a position of local key in the list. -func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { - nm, err := s.nmSource.GetNetMap(ctx, 0) +func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { + nm, err := s.nmSource.GetNetMap(0) if err != nil { return nil, -1, fmt.Errorf("can't get netmap: %w", err) } - cnr, err := s.cnrSource.Get(ctx, cid) + cnr, err := s.cnrSource.Get(cid) if err != nil { return nil, -1, fmt.Errorf("can't get container: %w", err) } diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go index 07503f8c3..0f0e4ee57 100644 --- a/pkg/services/tree/metrics.go +++ b/pkg/services/tree/metrics.go @@ -6,7 +6,6 @@ type MetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) - AddOperation(string, string) } type defaultMetricsRegister struct{} @@ -14,4 +13,3 @@ type defaultMetricsRegister struct{} func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {} -func (defaultMetricsRegister) AddOperation(string, string) {} diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 56cbcc081..a3f488009 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -1,9 +1,7 @@ package tree import ( - "context" "crypto/ecdsa" - "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -20,12 +18,12 @@ import ( type ContainerSource interface { container.Source - DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) + DeletionInfo(cid.ID) (*container.DelInfo, error) // List must return list of all the containers in the FrostFS network // at the moment of a call and any error that does not allow fetching // container information. - List(ctx context.Context) ([]cid.ID, error) + List() ([]cid.ID, error) } type cfg struct { @@ -42,7 +40,7 @@ type cfg struct { replicatorWorkerCount int replicatorTimeout time.Duration containerCacheSize int - authorizedKeys atomic.Pointer[[][]byte] + authorizedKeys [][]byte syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage @@ -148,7 +146,10 @@ func WithMetrics(v MetricsRegister) Option { // keys that have rights to use Tree service. func WithAuthorizedKeys(keys keys.PublicKeys) Option { return func(c *cfg) { - c.authorizedKeys.Store(fromPublicKeys(keys)) + c.authorizedKeys = nil + for _, key := range keys { + c.authorizedKeys = append(c.authorizedKeys, key.Bytes()) + } } } diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go deleted file mode 100644 index 8f21686df..000000000 --- a/pkg/services/tree/qos.go +++ /dev/null @@ -1,101 +0,0 @@ -package tree - -import ( - "context" - - "google.golang.org/grpc" -) - -var _ TreeServiceServer = (*ioTagAdjust)(nil) - -type AdjustIOTag interface { - AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context -} - -type ioTagAdjust struct { - s TreeServiceServer - a AdjustIOTag -} - -func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer { - return &ioTagAdjust{ - s: s, - a: a, - } -} - -func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Add(ctx, req) -} - -func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.AddByPath(ctx, req) -} - -func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Apply(ctx, req) -} - -func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.GetNodeByPath(ctx, req) -} - -func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { - ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) - return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{ - sender: srv, - ServerStream: srv, - ctxF: func() context.Context { return ctx }, - }) -} - -func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { - ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) - return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{ - sender: srv, - ServerStream: srv, - ctxF: func() context.Context { return ctx }, - }) -} - -func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Healthcheck(ctx, req) -} - -func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Move(ctx, req) -} - -func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.Remove(ctx, req) -} - -func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { - ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) - return i.s.TreeList(ctx, req) -} - -type qosSend[T any] interface { - Send(T) error -} - -type qosServerWrapper[T any] struct { - grpc.ServerStream - sender qosSend[T] - ctxF func() context.Context -} - -func (w *qosServerWrapper[T]) Send(resp T) error { - return w.sender.Send(resp) -} - -func (w *qosServerWrapper[T]) Context() context.Context { - return w.ctxF() -} diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 647f8cb30..416a0fafe 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -6,6 +6,7 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -19,8 +20,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request") func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { var resp *Resp var outErr error - err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { - resp, outErr = callback(c, fCtx, req) + err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { + resp, outErr = callback(c, ctx, req) return true }) if err != nil { @@ -31,7 +32,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { +func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { for _, n := range cntNodes { if bytes.Equal(n.PublicKey(), s.rawPub) { return nil @@ -41,15 +42,25 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo var called bool for _, n := range cntNodes { var stop bool - for endpoint := range n.NetworkEndpoints() { - stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { - called = true - return f(fCtx, c) - }) - if called { - break + n.IterateNetworkEndpoints(func(endpoint string) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + + c, err := s.cache.get(ctx, endpoint) + if err != nil { + return false } - } + + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + + called = true + stop = f(c) + return true + }) if stop { return nil } @@ -59,19 +70,3 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo } return nil } - -func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false - } - - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) - return f(ctx, c) -} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index ee40884eb..e0085d73a 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -89,23 +90,41 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req var lastErr error var lastAddr string - for addr := range n.NetworkEndpoints() { + n.IterateNetworkEndpoints(func(addr string) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + lastAddr = addr - lastErr = s.apply(ctx, n, addr, req) - if lastErr == nil { - break + + c, err := s.cache.get(ctx, addr) + if err != nil { + lastErr = fmt.Errorf("can't create client: %w", err) + return false } - } + + ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) + _, lastErr = c.Apply(ctx, req) + cancel() + + return lastErr == nil + }) if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode, - zap.String("last_error", lastErr.Error())) + zap.String("last_error", lastErr.Error()), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), - zap.String("key", hex.EncodeToString(n.PublicKey()))) + zap.String("key", hex.EncodeToString(n.PublicKey())), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } s.metrics.AddReplicateTaskDuration(time.Since(start), false) return lastErr @@ -114,26 +133,6 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req return nil } -func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - - c, err := s.cache.get(ctx, addr) - if err != nil { - return fmt.Errorf("can't create client: %w", err) - } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, err = c.Apply(ctx, req) - cancel() - return err -} - func (s *Service) replicateLoop(ctx context.Context) { for range s.replicatorWorkerCount { go s.replicationWorker(ctx) @@ -153,7 +152,7 @@ func (s *Service) replicateLoop(ctx context.Context) { return case op := <-s.replicateCh: start := time.Now() - err := s.replicate(ctx, op) + err := s.replicate(op) if err != nil { s.log.Error(ctx, logs.TreeErrorDuringReplication, zap.Error(err), @@ -165,14 +164,14 @@ func (s *Service) replicateLoop(ctx context.Context) { } } -func (s *Service) replicate(ctx context.Context, op movePair) error { +func (s *Service) replicate(op movePair) error { req := newApplyRequest(&op) err := SignMessage(req, s.key) if err != nil { return fmt.Errorf("can't sign data: %w", err) } - nodes, localIndex, err := s.getContainerNodes(ctx, op.cid) + nodes, localIndex, err := s.getContainerNodes(op.cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -206,7 +205,7 @@ func newApplyRequest(op *movePair) *ApplyRequest { TreeId: op.treeID, Operation: &LogMove{ ParentId: op.op.Parent, - Meta: op.op.Bytes(), + Meta: op.op.Meta.Bytes(), ChildId: op.op.Child, }, }, diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 3994d6973..2df3c08e6 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -9,15 +9,12 @@ import ( "sync" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -60,7 +57,6 @@ func New(opts ...Option) *Service { s.replicatorTimeout = defaultReplicatorSendTimeout s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} - s.authorizedKeys.Store(&[][]byte{}) for i := range opts { opts[i](&s.cfg) @@ -87,7 +83,6 @@ func New(opts ...Option) *Service { // Start starts the service. func (s *Service) Start(ctx context.Context) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) @@ -107,7 +102,6 @@ func (s *Service) Shutdown() { } func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { - defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -119,12 +113,12 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(ctx, cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return nil, err } @@ -151,7 +145,6 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error } func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { - defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -163,12 +156,12 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(ctx, cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return nil, err } @@ -207,7 +200,6 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP } func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { - defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -219,12 +211,12 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(ctx, cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return nil, err } @@ -252,7 +244,6 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon // Move applies client operation to the specified tree and pushes in queue // for replication on other nodes. func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { - defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -264,12 +255,12 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(ctx, cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return nil, err } @@ -296,7 +287,6 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er } func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { - defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -308,12 +298,12 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(ctx, cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return nil, err } @@ -347,11 +337,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } else { var metaValue []KeyValue for _, kv := range m.Items { - if slices.Contains(b.GetAttributes(), kv.Key) { - metaValue = append(metaValue, KeyValue{ - Key: kv.Key, - Value: kv.Value, - }) + for _, attr := range b.GetAttributes() { + if kv.Key == attr { + metaValue = append(metaValue, KeyValue{ + Key: kv.Key, + Value: kv.Value, + }) + break + } } } x.Meta = metaValue @@ -367,7 +360,6 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { - defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -379,20 +371,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet) if err != nil { return err } - ns, pos, err := s.getContainerNodes(srv.Context(), cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return err } if pos < 0 { var cli TreeService_GetSubTreeClient var outErr error - err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(fCtx, req) + err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { + cli, outErr = c.GetSubTree(srv.Context(), req) return true }) if err != nil { @@ -414,7 +406,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS type stackItem struct { values []pilorama.MultiNodeInfo parent pilorama.MultiNode - last *pilorama.Cursor + last *string } func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { @@ -438,8 +430,10 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid } if ms == nil { ms = m.Items - } else if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") + } else { + if len(m.Items) != 1 { + return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") + } } ts = append(ts, m.Time) ps = append(ps, p) @@ -463,13 +457,14 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid break } - var err error - item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) + nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) if err != nil { return err } + item.values = nodes + item.last = last - if len(item.values) == 0 { + if len(nodes) == 0 { stack = stack[:len(stack)-1] continue } @@ -591,8 +586,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di } // Apply locally applies operation from the remote node to the tree. -func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { - defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx)) +func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) { err := verifyMessage(req) if err != nil { return nil, err @@ -605,7 +599,7 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, key := req.GetSignature().GetKey() - _, pos, _, err := s.getContainerInfo(ctx, cid, key) + _, pos, _, err := s.getContainerInfo(cid, key) if err != nil { return nil, err } @@ -636,7 +630,6 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, } func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { - defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -648,15 +641,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) return err } - ns, pos, err := s.getContainerNodes(srv.Context(), cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return err } if pos < 0 { var cli TreeService_GetOpLogClient var outErr error - err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(fCtx, req) + err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { + cli, outErr = c.GetOpLog(srv.Context(), req) return true }) if err != nil { @@ -687,7 +680,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) Body: &GetOpLogResponse_Body{ Operation: &LogMove{ ParentId: lm.Parent, - Meta: lm.Bytes(), + Meta: lm.Meta.Bytes(), ChildId: lm.Child, }, }, @@ -701,7 +694,6 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) } func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { - defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -721,7 +713,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList return nil, err } - ns, pos, err := s.getContainerNodes(ctx, cid) + ns, pos, err := s.getContainerNodes(cid) if err != nil { return nil, err } @@ -763,8 +755,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue { // getContainerInfo returns the list of container nodes, position in the container for the node // with pub key and total amount of nodes in all replicas. -func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { - cntNodes, _, err := s.getContainerNodes(ctx, cid) +func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { + cntNodes, _, err := s.getContainerNodes(cid) if err != nil { return nil, 0, 0, err } @@ -784,15 +776,3 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec return new(HealthcheckResponse), nil } - -func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { - s.authorizedKeys.Store(fromPublicKeys(newKeys)) -} - -func fromPublicKeys(keys keys.PublicKeys) *[][]byte { - buff := make([][]byte, len(keys)) - for i, k := range keys { - buff[i] = k.Bytes() - } - return &buff -} diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 8221a4546..b0f00615a 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -38,7 +38,7 @@ var ( // Operation must be one of: // - 1. ObjectPut; // - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { +func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error { err := verifyMessage(req) if err != nil { return err @@ -49,7 +49,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return err } - cnr, err := s.cnrSource.Get(ctx, cid) + cnr, err := s.cnrSource.Get(cid) if err != nil { return fmt.Errorf("can't get container %s: %w", cid, err) } @@ -64,7 +64,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { + if err = s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey); err != nil { return apeErr(err) } return nil @@ -95,8 +95,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { } key := sign.GetKey() - for _, currentKey := range *s.authorizedKeys.Load() { - if bytes.Equal(currentKey, key) { + for i := range s.authorizedKeys { + if bytes.Equal(s.authorizedKeys[i], key) { return true, nil } } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 8815c227f..7bc5002dc 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -31,8 +31,6 @@ import ( "github.com/stretchr/testify/require" ) -const versionTreeID = "version" - type dummyNetmapSource struct { netmap.Source } @@ -41,7 +39,7 @@ type dummySubjectProvider struct { subjects map[util.Uint160]client.SubjectExtended } -func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { +func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) { res := s.subjects[addr] return &client.Subject{ PrimaryKey: res.PrimaryKey, @@ -52,7 +50,7 @@ func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) }, nil } -func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { +func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { res := s.subjects[addr] return &res, nil } @@ -67,7 +65,7 @@ func (s dummyEpochSource) CurrentEpoch() uint64 { type dummyContainerSource map[string]*containercore.Container -func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) { +func (s dummyContainerSource) List() ([]cid.ID, error) { res := make([]cid.ID, 0, len(s)) var cnr cid.ID @@ -83,7 +81,7 @@ func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) { return res, nil } -func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) { +func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { cnt, ok := s[id.String()] if !ok { return nil, errors.New("container not found") @@ -91,7 +89,7 @@ func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercor return cnt, nil } -func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) { +func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) { return &containercore.DelInfo{}, nil } @@ -152,7 +150,6 @@ func TestMessageSign(t *testing.T) { apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } - s.cfg.authorizedKeys.Store(&[][]byte{}) rawCID1 := make([]byte, sha256.Size) cid1.Encode(rawCID1) @@ -171,26 +168,26 @@ func TestMessageSign(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRW) t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) }) require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op)) t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) }) cnr.Value.SetBasicACL(acl.Private) t.Run("extension disabled", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) }) t.Run("invalid key", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op)) }) t.Run("bearer", func(t *testing.T) { @@ -203,7 +200,7 @@ func TestMessageSign(t *testing.T) { t.Run("invalid bearer", func(t *testing.T) { req.Body.BearerToken = []byte{0xFF} require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer CID", func(t *testing.T) { @@ -212,7 +209,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer owner", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -220,7 +217,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer signature", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -232,112 +229,20 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bv2.StableMarshal(nil) require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - - t.Run("omit override within bt", func(t *testing.T) { - t.Run("personated", func(t *testing.T) { - bt := testBearerTokenNoOverride() - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") - }) - - t.Run("impersonated", func(t *testing.T) { - bt := testBearerTokenNoOverride() - bt.SetImpersonate(true) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - }) - - t.Run("invalid override within bearer token", func(t *testing.T) { - t.Run("personated", func(t *testing.T) { - bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") - }) - - t.Run("impersonated", func(t *testing.T) { - bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) - bt.SetImpersonate(true) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") - }) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token - bt.SetExp(10) - bt.SetImpersonate(true) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - - t.Run("impersonate, but target user is still set", func(t *testing.T) { - var bt bearer.Token - bt.SetExp(10) bt.SetImpersonate(true) - var reqSigner user.ID - user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey())) - - bt.ForUser(reqSigner) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - - t.Run("impersonate but invalid signer", func(t *testing.T) { - var bt bearer.Token - bt.SetExp(10) - bt.SetImpersonate(true) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) require.NoError(t, bt.Sign(privs[1].PrivateKey)) req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -347,18 +252,18 @@ func TestMessageSign(t *testing.T) { t.Run("put and get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("only get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("none", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) }) } @@ -377,25 +282,6 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token return b } -func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - b.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - }, - Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, - }) - - return b -} - -func testBearerTokenNoOverride() bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - return b -} - func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { ruleGet := chain.Rule{ Status: chain.Allow, diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index af355639f..c48a312fb 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -2,9 +2,7 @@ package tree import ( "context" - "crypto/ecdsa" "crypto/sha256" - "crypto/tls" "errors" "fmt" "io" @@ -15,8 +13,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -24,15 +20,12 @@ import ( metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -46,7 +39,7 @@ const defaultSyncWorkerCount = 20 // tree IDs from the other container nodes. Returns ErrNotInContainer if the node // is not included in the container. func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { - nodes, pos, err := s.getContainerNodes(ctx, cid) + nodes, pos, err := s.getContainerNodes(cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -78,8 +71,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { var treesToSync []string var outErr error - err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { - resp, outErr = c.TreeList(fCtx, req) + err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool { + resp, outErr = c.TreeList(ctx, req) if outErr != nil { return false } @@ -119,7 +112,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { // SynchronizeTree tries to synchronize log starting from the last stored height. func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error { - nodes, pos, err := s.getContainerNodes(ctx, cid) + nodes, pos, err := s.getContainerNodes(cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -138,9 +131,14 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string } // mergeOperationStreams performs merge sort for node operation streams to one stream. -func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { +func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { defer close(merged) + ms := make([]*pilorama.Move, len(streams)) + for i := range streams { + ms[i] = <-streams[i] + } + // Merging different node streams shuffles incoming operations like that: // // x - operation from the stream A @@ -152,15 +150,6 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m // operation height from the stream B. This height is stored in minStreamedLastHeight. var minStreamedLastHeight uint64 = math.MaxUint64 - ms := make([]*pilorama.Move, len(streams)) - for i := range streams { - select { - case ms[i] = <-streams[i]: - case <-ctx.Done(): - return minStreamedLastHeight - } - } - for { var minTimeMoveTime uint64 = math.MaxUint64 minTimeMoveIndex := -1 @@ -175,11 +164,7 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m break } - select { - case merged <- ms[minTimeMoveIndex]: - case <-ctx.Done(): - return minStreamedLastHeight - } + merged <- ms[minTimeMoveIndex] height := ms[minTimeMoveIndex].Time if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil { minStreamedLastHeight = min(minStreamedLastHeight, height) @@ -191,7 +176,7 @@ func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, m func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, operationStream <-chan *pilorama.Move, -) (uint64, error) { +) uint64 { var prev *pilorama.Move var batch []*pilorama.Move for m := range operationStream { @@ -204,17 +189,17 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s if len(batch) == s.syncBatchSize { if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time, err + return batch[0].Time } batch = batch[:0] } } if len(batch) > 0 { if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time, err + return batch[0].Time } } - return math.MaxUint64, nil + return math.MaxUint64 } func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, @@ -247,14 +232,10 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, Parent: lm.GetParentId(), Child: lm.GetChildId(), } - if err := m.FromBytes(lm.GetMeta()); err != nil { + if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { return err } - select { - case opsCh <- m: - case <-ctx.Done(): - return ctx.Err() - } + opsCh <- m } if !errors.Is(err, io.EOF) { return err @@ -283,14 +264,13 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, merged := make(chan *pilorama.Move) var minStreamedLastHeight uint64 errGroup.Go(func() error { - minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged) + minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged) return nil }) var minUnappliedHeight uint64 errGroup.Go(func() error { - var err error - minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged) - return err + minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged) + return nil }) var allNodesSynced atomic.Bool @@ -299,27 +279,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, for i, n := range nodes { errGroup.Go(func() error { var nodeSynced bool - for addr := range n.NetworkEndpoints() { + n.IterateNetworkEndpoints(func(addr string) bool { var a network.Address if err := a.FromString(addr); err != nil { s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - continue + return false } - cc, err := dialTreeService(ctx, a, s.key, s.ds) + cc, err := s.createConnection(a) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - continue + return false } + defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil - _ = cc.Close() - break - } + return true + }) close(nodeOperationStreams[i]) if !nodeSynced { allNodesSynced.Store(false) @@ -344,60 +324,19 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { - cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - defer cancel() - - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, key); err != nil { - return nil, err - } - - // perform some request to check connection - if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { - _ = cc.Close() - return nil, err - } - return cc, nil -} - -func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - host, isTLS, err := client.ParseURI(a.URIAddr()) - if err != nil { - return nil, err - } - - creds := insecure.NewCredentials() - if isTLS { - creds = credentials.NewTLS(&tls.Config{}) - } - - defaultOpts := []grpc.DialOption{ +func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { + return grpc.NewClient(a.URIAddr(), grpc.WithChainUnaryInterceptor( - qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), + tracing_grpc.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( - qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), - tagging.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(creds), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - grpc.WithDisableServiceConfig(), - } - - return grpc.NewClient(host, append(defaultOpts, opts...)...) + ) } // ErrAlreadySyncing is returned when a service synchronization has already @@ -441,7 +380,7 @@ func (s *Service) syncLoop(ctx context.Context) { start := time.Now() - cnrs, err := s.cnrSource.List(ctx) + cnrs, err := s.cfg.cnrSource.List() if err != nil { s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) @@ -511,7 +450,7 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID continue } - existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr) + existed, err := containerCore.WasRemoved(s.cnrSource, cnr) if err != nil { s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted, zap.Stringer("cid", cnr), @@ -541,7 +480,7 @@ func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid. cnrsToSync := make([]cid.ID, 0, len(cnrs)) for _, cnr := range cnrs { - _, pos, err := s.getContainerNodes(ctx, cnr) + _, pos, err := s.getContainerNodes(cnr) if err != nil { s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go index 87d419408..497d90554 100644 --- a/pkg/services/tree/sync_test.go +++ b/pkg/services/tree/sync_test.go @@ -1,7 +1,6 @@ package tree import ( - "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -65,7 +64,7 @@ func Test_mergeOperationStreams(t *testing.T) { merged := make(chan *pilorama.Move, 1) min := make(chan uint64) go func() { - min <- mergeOperationStreams(context.Background(), nodeOpChans, merged) + min <- mergeOperationStreams(nodeOpChans, merged) }() var res []uint64 diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go index 6f114d45b..b4a31fd8d 100644 --- a/pkg/util/ape/parser.go +++ b/pkg/util/ape/parser.go @@ -174,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) { case "deny": if !found { return apechain.AccessDenied, nil - } - if strings.EqualFold(expression, "QuotaLimitReached") { + } else if strings.EqualFold(expression, "QuotaLimitReached") { return apechain.QuotaLimitReached, nil + } else { + return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) } - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) case "allow": if found { return 0, errUnknownStatusDetail @@ -261,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) { } else { if lexeme == "*" { return nativeschema.ResourceFormatAllContainers, nil - } else if lexeme == "/*" || lexeme == "root/*" { + } else if lexeme == "/*" { return nativeschema.ResourceFormatRootContainers, nil } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 { lexeme = lexeme[1:] diff --git a/pkg/util/ape/parser_test.go b/pkg/util/ape/parser_test.go index c236c4603..21649fd24 100644 --- a/pkg/util/ape/parser_test.go +++ b/pkg/util/ape/parser_test.go @@ -43,15 +43,6 @@ func TestParseAPERule(t *testing.T) { Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, }, }, - { - name: "Valid rule for all containers in explicit root namespace", - rule: "allow Container.Put root/*", - expectRule: policyengine.Rule{ - Status: policyengine.Allow, - Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}}, - Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}}, - }, - }, { name: "Valid rule for all objects in root namespace and container", rule: "allow Object.Put /cid/*", diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go index 66581878a..547c8d50b 100644 --- a/pkg/util/attributes/parser_test.go +++ b/pkg/util/attributes/parser_test.go @@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) { mExp = mSrc } - for key, value := range node.Attributes() { + node.IterateAttributes(func(key, value string) { v, ok := mExp[key] require.True(t, ok) require.Equal(t, value, v) delete(mExp, key) - } + }) require.Empty(t, mExp) } diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go index 2589ab786..923412a7f 100644 --- a/pkg/util/http/server.go +++ b/pkg/util/http/server.go @@ -76,7 +76,8 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server { o(c) } - if c.shutdownTimeout <= 0 { + switch { + case c.shutdownTimeout <= 0: panicOnOptValue("shutdown timeout", c.shutdownTimeout) } diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go index 6337039a9..b2942b52a 100644 --- a/pkg/util/keyer/dashboard.go +++ b/pkg/util/keyer/dashboard.go @@ -6,7 +6,6 @@ import ( "os" "text/tabwriter" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -105,7 +104,9 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { func base58ToHex(data string) string { val, err := base58.Decode(data) - assert.NoError(err, "produced incorrect base58 value") + if err != nil { + panic("produced incorrect base58 value") + } return hex.EncodeToString(val) } diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go index 413b1d9aa..269e07d90 100644 --- a/pkg/util/logger/log.go +++ b/pkg/util/logger/log.go @@ -4,32 +4,37 @@ import ( "context" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" ) func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Debug(msg, appendContext(ctx, fields...)...) + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Debug(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Debug(msg, fields...) } func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Info(msg, appendContext(ctx, fields...)...) + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Info(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Info(msg, fields...) } func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Warn(msg, appendContext(ctx, fields...)...) + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Warn(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Warn(msg, fields...) } func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { - l.z.Error(msg, appendContext(ctx, fields...)...) -} - -func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field { if traceID := tracing.GetTraceID(ctx); traceID != "" { - fields = append(fields, zap.String("trace_id", traceID)) + l.z.Error(msg, append(fields, zap.String("trace_id", traceID))...) + return } - if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined { - fields = append(fields, zap.String("io_tag", ioTag)) - } - return fields + l.z.Error(msg, fields...) } diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index a1998cb1a..19d3f1ed1 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -2,7 +2,6 @@ package logger import ( "fmt" - "time" "git.frostfs.info/TrueCloudLab/zapjournald" "github.com/ssgreg/journald" @@ -13,10 +12,8 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - z *zap.Logger - c zapcore.Core - t Tag - w bool + z *zap.Logger + lvl zap.AtomicLevel } // Prm groups Logger's parameters. @@ -25,8 +22,16 @@ type Logger struct { // Parameters that have been connected to the Logger support its // configuration changing. // -// See also Logger.Reload, SetLevelString. +// Passing Prm after a successful connection via the NewLogger, connects +// the Prm to a new instance of the Logger. +// +// See also Reload, SetLevelString. type Prm struct { + // link to the created Logger + // instance; used for a runtime + // reconfiguration + _log *Logger + // support runtime rereading level zapcore.Level @@ -38,12 +43,6 @@ type Prm struct { // PrependTimestamp specifies whether to prepend a timestamp in the log PrependTimestamp bool - - // Options for zap.Logger - Options []zap.Option - - // map of tag's bit masks to log level, overrides lvl - tl map[Tag]zapcore.Level } const ( @@ -73,10 +72,20 @@ func (p *Prm) SetDestination(d string) error { return nil } -// SetTags parses list of tags with log level. -func (p *Prm) SetTags(tags [][]string) (err error) { - p.tl, err = parseTags(tags) - return err +// Reload reloads configuration of a connected instance of the Logger. +// Returns ErrLoggerNotConnected if no connection has been performed. +// Returns any reconfiguration error from the Logger directly. +func (p Prm) Reload() error { + if p._log == nil { + // incorrect logger usage + panic("parameters are not connected to any Logger") + } + + return p._log.reload(p) +} + +func defaultPrm() *Prm { + return new(Prm) } // NewLogger constructs a new zap logger instance. Constructing with nil @@ -90,7 +99,10 @@ func (p *Prm) SetTags(tags [][]string) (err error) { // - ISO8601 time encoding. // // Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm Prm) (*Logger, error) { +func NewLogger(prm *Prm) (*Logger, error) { + if prm == nil { + prm = defaultPrm() + } switch prm.dest { case DestinationUndefined, DestinationStdout: return newConsoleLogger(prm) @@ -101,9 +113,11 @@ func NewLogger(prm Prm) (*Logger, error) { } } -func newConsoleLogger(prm Prm) (*Logger, error) { +func newConsoleLogger(prm *Prm) (*Logger, error) { + lvl := zap.NewAtomicLevelAt(prm.level) + c := zap.NewProductionConfig() - c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + c.Level = lvl c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -115,23 +129,26 @@ func newConsoleLogger(prm Prm) (*Logger, error) { c.EncoderConfig.TimeKey = "" } - opts := []zap.Option{ + lZap, err := c.Build( zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1), - } - opts = append(opts, prm.Options...) - lZap, err := c.Build(opts...) + ) if err != nil { return nil, err } - l := &Logger{z: lZap, c: lZap.Core()} - l = l.WithTag(TagMain) + + l := &Logger{z: lZap, lvl: lvl} + prm._log = l return l, nil } -func newJournaldLogger(prm Prm) (*Logger, error) { +func newJournaldLogger(prm *Prm) (*Logger, error) { + lvl := zap.NewAtomicLevelAt(prm.level) + c := zap.NewProductionConfig() + c.Level = lvl + c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook } @@ -144,100 +161,36 @@ func newJournaldLogger(prm Prm) (*Logger, error) { encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) coreWithContext := core.With([]zapcore.Field{ zapjournald.SyslogFacility(zapjournald.LogDaemon), zapjournald.SyslogIdentifier(), zapjournald.SyslogPid(), }) - var samplerOpts []zapcore.SamplerOption - if c.Sampling.Hook != nil { - samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook)) - } - samplingCore := zapcore.NewSamplerWithOptions( - coreWithContext, - time.Second, - c.Sampling.Initial, - c.Sampling.Thereafter, - samplerOpts..., - ) - opts := []zap.Option{ - zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), - zap.AddCallerSkip(1), - } - opts = append(opts, prm.Options...) - lZap := zap.New(samplingCore, opts...) - l := &Logger{z: lZap, c: lZap.Core()} - l = l.WithTag(TagMain) + lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) + + l := &Logger{z: lZap, lvl: lvl} + prm._log = l return l, nil } -// With create a child logger with new fields, don't affect the parent. -// Throws panic if tag is unset. +func (l *Logger) reload(prm Prm) error { + l.lvl.SetLevel(prm.level) + return nil +} + +func (l *Logger) WithOptions(options ...zap.Option) { + l.z = l.z.WithOptions(options...) +} + func (l *Logger) With(fields ...zap.Field) *Logger { - if l.t == 0 { - panic("tag is unset") - } - c := *l - c.z = l.z.With(fields...) - // With called under the logger - c.w = true - return &c -} - -type core struct { - c zapcore.Core - l zap.AtomicLevel -} - -func (c *core) Enabled(lvl zapcore.Level) bool { - return c.l.Enabled(lvl) -} - -func (c *core) With(fields []zapcore.Field) zapcore.Core { - clone := *c - clone.c = clone.c.With(fields) - return &clone -} - -func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - return c.c.Check(e, ce) -} - -func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { - return c.c.Write(e, fields) -} - -func (c *core) Sync() error { - return c.c.Sync() -} - -// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. -// Throws panic if provided unsupported tag. -func (l *Logger) WithTag(tag Tag) *Logger { - if tag == 0 || tag > Tag(len(_Tag_index)-1) { - panic("unsupported tag " + tag.String()) - } - if l.w { - panic("unsupported operation for the logger's state") - } - c := *l - c.t = tag - c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { - return &core{ - c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), - l: tagToLogLevel[tag], - } - })) - return &c + return &Logger{z: l.z.With(fields...)} } func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z.WithOptions(zap.AddCallerSkip(1)), - t: TagMain, - c: z.Core(), } } diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go deleted file mode 100644 index b867ee6cc..000000000 --- a/pkg/util/logger/logger_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package logger - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest/observer" -) - -func BenchmarkLogger(b *testing.B) { - ctx := context.Background() - m := map[string]Prm{} - - prm := Prm{} - require.NoError(b, prm.SetLevelString("debug")) - m["logging enabled"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("error")) - m["logging disabled"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("error")) - require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) - m["logging enabled via tags"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("debug")) - require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) - m["logging disabled via tags"] = prm - - for k, v := range m { - b.Run(k, func(b *testing.B) { - logger, err := createLogger(v) - require.NoError(b, err) - UpdateLevelForTags(v) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - logger.Info(ctx, "test info") - } - }) - } -} - -type testCore struct { - core zapcore.Core -} - -func (c *testCore) Enabled(lvl zapcore.Level) bool { - return c.core.Enabled(lvl) -} - -func (c *testCore) With(fields []zapcore.Field) zapcore.Core { - c.core = c.core.With(fields) - return c -} - -func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - return ce.AddCore(e, c) -} - -func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { - return nil -} - -func (c *testCore) Sync() error { - return c.core.Sync() -} - -func createLogger(prm Prm) (*Logger, error) { - prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { - tc := testCore{core: core} - return &tc - })} - return NewLogger(prm) -} - -func TestLoggerOutput(t *testing.T) { - obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) - - prm := Prm{} - require.NoError(t, prm.SetLevelString("debug")) - prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { - return obs - })} - loggerMain, err := NewLogger(prm) - require.NoError(t, err) - UpdateLevelForTags(prm) - - loggerMainWith := loggerMain.With(zap.String("key", "value")) - - require.Panics(t, func() { - loggerMainWith.WithTag(TagShard) - }) - loggerShard := loggerMain.WithTag(TagShard) - loggerShard = loggerShard.With(zap.String("key1", "value1")) - - loggerMorph := loggerMain.WithTag(TagMorph) - loggerMorph = loggerMorph.With(zap.String("key2", "value2")) - - ctx := context.Background() - loggerMain.Debug(ctx, "main") - loggerMainWith.Debug(ctx, "main with") - loggerShard.Debug(ctx, "shard") - loggerMorph.Debug(ctx, "morph") - - require.Len(t, logs.All(), 4) - require.Len(t, logs.FilterFieldKey("key").All(), 1) - require.Len(t, logs.FilterFieldKey("key1").All(), 1) - require.Len(t, logs.FilterFieldKey("key2").All(), 1) - require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) - require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) - require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) -} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result deleted file mode 100644 index 612fa2967..000000000 --- a/pkg/util/logger/logger_test.result +++ /dev/null @@ -1,46 +0,0 @@ -goos: linux -goarch: amd64 -pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger -cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz -BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op -PASS -ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go deleted file mode 100644 index 1b98f2e62..000000000 --- a/pkg/util/logger/tag_string.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. - -package logger - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TagMain-1] - _ = x[TagMorph-2] - _ = x[TagGrpcSvc-3] - _ = x[TagIr-4] - _ = x[TagProcessor-5] - _ = x[TagEngine-6] - _ = x[TagBlobovnicza-7] - _ = x[TagBlobovniczaTree-8] - _ = x[TagBlobstor-9] - _ = x[TagFSTree-10] - _ = x[TagGC-11] - _ = x[TagShard-12] - _ = x[TagWriteCache-13] - _ = x[TagDeleteSvc-14] - _ = x[TagGetSvc-15] - _ = x[TagSearchSvc-16] - _ = x[TagSessionSvc-17] - _ = x[TagTreeSvc-18] - _ = x[TagPolicer-19] - _ = x[TagReplicator-20] -} - -const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" - -var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} - -func (i Tag) String() string { - i -= 1 - if i >= Tag(len(_Tag_index)-1) { - return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] -} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go deleted file mode 100644 index a5386707e..000000000 --- a/pkg/util/logger/tags.go +++ /dev/null @@ -1,94 +0,0 @@ -package logger - -import ( - "fmt" - "strings" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -//go:generate stringer -type Tag -linecomment - -type Tag uint8 - -const ( - _ Tag = iota // - TagMain // main - TagMorph // morph - TagGrpcSvc // grpcsvc - TagIr // ir - TagProcessor // processor - TagEngine // engine - TagBlobovnicza // blobovnicza - TagBlobovniczaTree // blobovniczatree - TagBlobstor // blobstor - TagFSTree // fstree - TagGC // gc - TagShard // shard - TagWriteCache // writecache - TagDeleteSvc // deletesvc - TagGetSvc // getsvc - TagSearchSvc // searchsvc - TagSessionSvc // sessionsvc - TagTreeSvc // treesvc - TagPolicer // policer - TagReplicator // replicator - - defaultLevel = zapcore.InfoLevel -) - -var ( - tagToLogLevel = map[Tag]zap.AtomicLevel{} - stringToTag = map[string]Tag{} -) - -func init() { - for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { - tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) - stringToTag[i.String()] = i - } -} - -// parseTags returns: -// - map(always instantiated) of tag to custom log level for that tag; -// - error if it occurred(map is empty). -func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { - m := make(map[Tag]zapcore.Level) - if len(raw) == 0 { - return m, nil - } - for _, item := range raw { - str, level := item[0], item[1] - if len(level) == 0 { - // It is not necessary to parse tags without level, - // because default log level will be used. - continue - } - var l zapcore.Level - err := l.UnmarshalText([]byte(level)) - if err != nil { - return nil, err - } - tmp := strings.Split(str, ",") - for _, tagStr := range tmp { - tag, ok := stringToTag[strings.TrimSpace(tagStr)] - if !ok { - return nil, fmt.Errorf("unsupported tag %s", str) - } - m[tag] = l - } - } - return m, nil -} - -func UpdateLevelForTags(prm Prm) { - for k, v := range tagToLogLevel { - nk, ok := prm.tl[k] - if ok { - v.SetLevel(nk) - } else { - v.SetLevel(prm.level) - } - } -} diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go deleted file mode 100644 index 7373e538f..000000000 --- a/pkg/util/testing/netmap_source.go +++ /dev/null @@ -1,36 +0,0 @@ -package testing - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -var ( - errInvalidDiff = errors.New("invalid diff") - errNetmapNotFound = errors.New("netmap not found") -) - -type TestNetmapSource struct { - Netmaps map[uint64]*netmap.NetMap - CurrentEpoch uint64 -} - -func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - if diff >= s.CurrentEpoch { - return nil, errInvalidDiff - } - return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) -} - -func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.Netmaps[epoch]; found { - return nm, nil - } - return nil, errNetmapNotFound -} - -func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { - return s.CurrentEpoch, nil -} diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index 39a420358..f2f8881cf 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -1,10 +1,8 @@ package internal import ( - cryptorand "crypto/rand" "crypto/sha256" "fmt" - "math/rand" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -16,13 +14,14 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" + "golang.org/x/exp/rand" ) func GeneratePayloadPool(count uint, size uint) [][]byte { var pool [][]byte for range count { payload := make([]byte, size) - _, _ = cryptorand.Read(payload) + _, _ = rand.Read(payload) pool = append(pool, payload) } diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go index fafe61eaa..4da23a295 100644 --- a/scripts/populate-metabase/internal/populate.go +++ b/scripts/populate-metabase/internal/populate.go @@ -31,10 +31,13 @@ func PopulateWithObjects( for range count { obj := factory() - id := fmt.Appendf(nil, "%c/%c/%c", + + id := []byte(fmt.Sprintf( + "%c/%c/%c", digits[rand.Int()%len(digits)], digits[rand.Int()%len(digits)], - digits[rand.Int()%len(digits)]) + digits[rand.Int()%len(digits)], + )) prm := meta.PutPrm{} prm.SetObject(obj)