diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile new file mode 100644 index 000000000..4234de160 --- /dev/null +++ b/.ci/Jenkinsfile @@ -0,0 +1,81 @@ +def golang = ['1.23', '1.24'] +def golangDefault = "golang:${golang.last()}" + +async { + + for (version in golang) { + def go = version + + task("test/go${go}") { + container("golang:${go}") { + sh 'make test' + } + } + + task("build/go${go}") { + container("golang:${go}") { + for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { + sh """ + make bin/frostfs-${app} + bin/frostfs-${app} --version + """ + } + } + } + } + + task('test/race') { + container(golangDefault) { + sh 'make test GOFLAGS="-count=1 -race"' + } + } + + task('lint') { + container(golangDefault) { + sh 'make lint-install lint' + } + } + + task('staticcheck') { + container(golangDefault) { + sh 'make staticcheck-install staticcheck-run' + } + } + + task('gopls') { + container(golangDefault) { + sh 'make gopls-install gopls-run' + } + } + + task('gofumpt') { + container(golangDefault) { + sh ''' + make fumpt-install + make fumpt + git diff --exit-code --quiet + ''' + } + } + + task('vulncheck') { + container(golangDefault) { + sh ''' + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck ./... + ''' + } + } + + task('pre-commit') { + dockerfile(""" + FROM ${golangDefault} + RUN apt update && \ + apt install -y --no-install-recommends pre-commit + """) { + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' + } + } + } +} diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index 9129d136e..d568b9607 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 7c5af8410..190d7764a 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml new file mode 100644 index 000000000..fe91d65f9 --- /dev/null +++ b/.forgejo/workflows/oci-image.yml @@ -0,0 +1,28 @@ +name: OCI image + +on: + push: + workflow_dispatch: + +jobs: + image: + name: Build container images + runs-on: docker + container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm + steps: + - name: Clone git repo + uses: actions/checkout@v3 + + - name: Build OCI image + run: make images + + - name: Push image to OCI registry + run: | + echo "$REGISTRY_PASSWORD" \ + | docker login --username truecloudlab --password-stdin git.frostfs.info + make push-images + if: >- + startsWith(github.ref, 'refs/tags/v') && + (github.event_name == 'workflow_dispatch' || github.event_name == 'push') + env: + REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index b27e7a39a..c2e293175 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.23 + go-version: 1.24 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index 4f1bebe61..f3f5432ce 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install linters @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -53,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' cache: true - name: Run tests @@ -68,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install staticcheck @@ -104,7 +104,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install gofumpt diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index cf15005b1..bc94792d8 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' + check-latest: true - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.golangci.yml b/.golangci.yml index 57e3b4494..e3ec09f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,93 +1,107 @@ -# This file contains all available configuration options -# with their default values. - -# options for analysis running +version: "2" run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 20m - - # include test files or not, default is true tests: false - -# output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - - format: tab - -# all available settings of specific linters -linters-settings: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: true - govet: - # report about shadowed variables - check-shadowing: false - staticcheck: - checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. - funlen: - lines: 80 # default 60 - statements: 60 # default 40 - gocognit: - min-complexity: 40 # default 30 - importas: - no-unaliased: true - no-extra-aliases: false - alias: - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - target-methods : ["reportFlushError", "reportError"] - disable-packages: ["codes", "err", "res","exec"] - constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - + tab: + path: stdout + colors: false linters: + default: none enable: - # mandatory linters - - govet - - revive - - # some default golangci-lint linters - - errcheck - - gosimple - - godot - - ineffassign - - staticcheck - - typecheck - - unused - - # extra linters - bidichk - - durationcheck - - exhaustive - - copyloopvar - - gofmt - - goimports - - misspell - - predeclared - - reassign - - whitespace - containedctx + - contextcheck + - copyloopvar + - durationcheck + - errcheck + - exhaustive - funlen - gocognit - - contextcheck + - gocritic + - godot - importas - - truecloudlab-linters - - perfsprint - - testifylint - - protogetter + - ineffassign - intrange - - tenv - disable-all: true - fast: false + - misspell + - perfsprint + - predeclared + - protogetter + - reassign + - revive + - staticcheck + - testifylint + - truecloudlab-linters + - unconvert + - unparam + - unused + - usetesting + - whitespace + settings: + exhaustive: + default-signifies-exhaustive: true + funlen: + lines: 80 + statements: 60 + gocognit: + min-complexity: 40 + gocritic: + disabled-checks: + - ifElseChain + importas: + alias: + - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + no-unaliased: true + no-extra-aliases: false + staticcheck: + checks: + - all + - -QF1002 + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs + disable-packages: + - codes + - err + - res + - exec + target-methods: + - reportFlushError + - reportError + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - goimports + settings: + gci: + sections: + - standard + - default + custom-order: true + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/CHANGELOG.md b/CHANGELOG.md index e4ba6a5d6..92c84ab16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,30 @@ Changelog for FrostFS Node ### Removed ### Updated +## [v0.44.0] - 2024-25-11 - Rongbuk + +### Added +- Allow to prioritize nodes during GET traversal via attributes (#1439) +- Add metrics for the frostfsid cache (#1464) +- Customize constant attributes attached to every tracing span (#1488) +- Manage additional keys in the `frostfsid` contract (#1505) +- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519) + +### Changed +- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396) +- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515) + +### Fixed +- Fix EC object search (#1408) +- Fix EC object put when one of the nodes is unavailable (#1427) + +### Removed +- Drop most of the eACL-related code (#1425) +- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483) + +### Upgrading from v0.43.0 +The metabase schema has changed completely, resync is required. + ## [v0.42.0] ### Added diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..d19c96a5c --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers +.forgejo/.* @potyarkin +Makefile @potyarkin diff --git a/Makefile b/Makefile index ecac760e9..575eaae6f 100755 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ #!/usr/bin/make -f SHELL = bash +.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") @@ -7,16 +8,16 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.62.0 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 +GO_VERSION ?= 1.23 +LINT_VERSION ?= 2.0.2 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2024.1.1 +STATICCHECK_VERSION ?= 2025.1.1 ARCH = amd64 BIN = bin @@ -42,7 +43,7 @@ GOFUMPT_VERSION ?= v0.7.0 GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) -GOPLS_VERSION ?= v0.15.1 +GOPLS_VERSION ?= v0.17.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) @@ -115,7 +116,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir $(PROTOBUF_DIR) + @mkdir -p $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -139,6 +140,15 @@ images: image-storage image-ir image-cli image-adm # Build dirty local Docker images dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm +# Push FrostFS components' docker image to the registry +push-image-%: + @echo "⇒ Publish FrostFS $* docker image " + @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) + +# Push all Docker images to the registry +.PHONY: push-images +push-images: push-image-storage push-image-ir push-image-cli push-image-adm + # Run `make %` in Golang container docker/%: docker run --rm -t \ @@ -160,7 +170,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir $(GOFUMPT_DIR) + @mkdir -p $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -177,21 +187,44 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... +# Install Gerrit commit-msg hook +review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks +review-install: + @git config remote.review.url \ + || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node + @mkdir -p $(GIT_HOOK_DIR)/ + @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg + @chmod +x $(GIT_HOOK_DIR)/commit-msg + @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg + @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg + +# Create a PR in Gerrit +review: BRANCH ?= master +review: + @git push review HEAD:refs/for/$(BRANCH) \ + --push-option r=e.stratonikov@yadro.com \ + --push-option r=d.stepanov@yadro.com \ + --push-option r=an.nikiforov@yadro.com \ + --push-option r=a.arifullin@yadro.com \ + --push-option r=ekaterina.lebedeva@yadro.com \ + --push-option r=a.savchuk@yadro.com \ + --push-option r=a.chuprov@yadro.com + # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: +lint-install: $(BIN) @rm -rf $(OUTPUT_LINT_DIR) - @mkdir $(OUTPUT_LINT_DIR) + @mkdir -p $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -203,7 +236,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir $(STATICCHECK_DIR) + @mkdir -p $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -216,7 +249,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir $(GOPLS_DIR) + @mkdir -p $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls @@ -270,10 +303,12 @@ env-up: all echo "Frostfs contracts not found"; exit 1; \ fi ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ + --storage-wallet ./dev/storage/wallet01.json \ + --storage-wallet ./dev/storage/wallet02.json \ + --storage-wallet ./dev/storage/wallet03.json \ + --storage-wallet ./dev/storage/wallet04.json + @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ make locode-download; \ fi diff --git a/VERSION b/VERSION index 01efe7f3a..9052dab96 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.42.0 +v0.44.0 diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index 87692d013..f194e97f5 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -16,9 +16,16 @@ const ( EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagShort = "r" + WalletPath = "wallet" + WalletPathShorthand = "w" + WalletPathUsage = "Path to the wallet" + AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" + AdminWalletPath = "wallet-admin" + AdminWalletUsage = "Path to the admin wallet" + LocalDumpFlag = "local-dump" ProtoConfigPath = "protocol" ContractsInitFlag = "contracts" diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go new file mode 100644 index 000000000..d67b70d2a --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/root.go @@ -0,0 +1,15 @@ +package maintenance + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" + "github.com/spf13/cobra" +) + +var RootCmd = &cobra.Command{ + Use: "maintenance", + Short: "Section for maintenance commands", +} + +func init() { + RootCmd.AddCommand(zombie.Cmd) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go new file mode 100644 index 000000000..1b66889aa --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go @@ -0,0 +1,70 @@ +package zombie + +import ( + "crypto/ecdsa" + "fmt" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/cli/flags" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { + keyDesc := viper.GetString(walletFlag) + if keyDesc == "" { + return &nodeconfig.Key(appCfg).PrivateKey + } + data, err := os.ReadFile(keyDesc) + commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) + + priv, err := keys.NewPrivateKeyFromBytes(data) + if err != nil { + w, err := wallet.NewWalletFromFile(keyDesc) + commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) + return fromWallet(cmd, w, viper.GetString(addressFlag)) + } + return &priv.PrivateKey +} + +func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { + var ( + addr util.Uint160 + err error + ) + + if addrStr == "" { + addr = w.GetChangeAddress() + } else { + addr, err = flags.ParseAddress(addrStr) + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) + } + + acc := w.GetAccount(addr) + if acc == nil { + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) + } + + pass, err := getPassword() + commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) + + commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) + + return &acc.PrivateKey().PrivateKey +} + +func getPassword() (string, error) { + // this check allows empty passwords + if viper.IsSet("password") { + return viper.GetString("password"), nil + } + + return input.ReadPassword("Enter password > ") +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go new file mode 100644 index 000000000..f73f33db9 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go @@ -0,0 +1,31 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func list(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + var containerID *cid.ID + if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { + containerID = &cid.ID{} + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + } + + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { + if containerID != nil && a.Container() != *containerID { + return nil + } + cmd.Println(a.EncodeToString()) + return nil + })) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go new file mode 100644 index 000000000..cd3a64499 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go @@ -0,0 +1,46 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "github.com/spf13/cobra" +) + +func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { + addresses := morphconfig.RPCEndpoint(appCfg) + if len(addresses) == 0 { + commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) + } + key := nodeconfig.Key(appCfg) + cli, err := client.New(cmd.Context(), + key, + client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), + client.WithEndpoints(addresses...), + client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), + ) + commonCmd.ExitOnErr(cmd, "create morph client: %w", err) + return cli +} + +func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { + hs, err := morph.NNSContractAddress(client.NNSContainerContractName) + commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) + cc, err := cntClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) + return cc +} + +func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { + hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) + commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) + cli, err := netmapClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) + return cli +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go new file mode 100644 index 000000000..27f83aec7 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go @@ -0,0 +1,154 @@ +package zombie + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "sync" + + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +type quarantine struct { + // mtx protects current field. + mtx sync.Mutex + current int + trees []*fstree.FSTree +} + +func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { + var paths []string + for _, sh := range engineInfo.Shards { + var storagePaths []string + for _, st := range sh.BlobStorInfo.SubStorages { + storagePaths = append(storagePaths, st.Path) + } + if len(storagePaths) == 0 { + continue + } + paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) + } + q, err := newQuarantine(paths) + commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) + return q +} + +func commonPath(paths []string) string { + if len(paths) == 0 { + return "" + } + if len(paths) == 1 { + return paths[0] + } + minLen := math.MaxInt + for _, p := range paths { + if len(p) < minLen { + minLen = len(p) + } + } + + var sb strings.Builder + for i := range minLen { + for _, path := range paths[1:] { + if paths[0][i] != path[i] { + return sb.String() + } + } + sb.WriteByte(paths[0][i]) + } + return sb.String() +} + +func newQuarantine(paths []string) (*quarantine, error) { + var q quarantine + for i := range paths { + f := fstree.New( + fstree.WithDepth(1), + fstree.WithDirNameLen(1), + fstree.WithPath(paths[i]), + fstree.WithPerm(os.ModePerm), + ) + if err := f.Open(mode.ComponentReadWrite); err != nil { + return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) + } + if err := f.Init(); err != nil { + return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) + } + q.trees = append(q.trees, f) + } + return &q, nil +} + +func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { + for i := range q.trees { + res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) + if err != nil { + continue + } + return res.Object, nil + } + return nil, &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { + for i := range q.trees { + _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) + if err != nil { + continue + } + return nil + } + return &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { + data, err := obj.Marshal() + if err != nil { + return err + } + + var prm common.PutPrm + prm.Address = objectcore.AddressOf(obj) + prm.Object = obj + prm.RawData = data + + q.mtx.Lock() + current := q.current + q.current = (q.current + 1) % len(q.trees) + q.mtx.Unlock() + + _, err = q.trees[current].Put(ctx, prm) + return err +} + +func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { + var prm common.IteratePrm + prm.Handler = func(elem common.IterationElement) error { + return f(elem.Address) + } + for i := range q.trees { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + _, err := q.trees[i].Iterate(ctx, prm) + if err != nil { + return err + } + } + return nil +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go new file mode 100644 index 000000000..0b8f2f172 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go @@ -0,0 +1,55 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func remove(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + removeObject(cmd, q, addr) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + removeObject(cmd, q, addr) + return nil + })) + } +} + +func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { + err := q.Delete(cmd.Context(), addr) + if errors.Is(err, new(apistatus.ObjectNotFound)) { + return + } + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go new file mode 100644 index 000000000..f179c7c2d --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go @@ -0,0 +1,69 @@ +package zombie + +import ( + "crypto/sha256" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func restore(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + restoreObject(cmd, storageEngine, q, addr, cnrCli) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + restoreObject(cmd, storageEngine, q, addr, cnrCli) + return nil + })) + } +} + +func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { + obj, err := q.Get(cmd.Context(), addr) + commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) + rawCID := make([]byte, sha256.Size) + + cid := addr.Container() + cid.Encode(rawCID) + cnr, err := cnrCli.Get(cmd.Context(), rawCID) + commonCmd.ExitOnErr(cmd, "get container: %w", err) + + putPrm := engine.PutPrm{ + Object: obj, + IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), + } + commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go new file mode 100644 index 000000000..c8fd9e5e5 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go @@ -0,0 +1,123 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + flagBatchSize = "batch-size" + flagBatchSizeUsage = "Objects iteration batch size" + cidFlag = "cid" + cidFlagUsage = "Container ID" + oidFlag = "oid" + oidFlagUsage = "Object ID" + walletFlag = "wallet" + walletFlagShorthand = "w" + walletFlagUsage = "Path to the wallet or binary key" + addressFlag = "address" + addressFlagUsage = "Address of wallet account" + moveFlag = "move" + moveFlagUsage = "Move objects from storage engine to quarantine" +) + +var ( + Cmd = &cobra.Command{ + Use: "zombie", + Short: "Zombie objects related commands", + } + scanCmd = &cobra.Command{ + Use: "scan", + Short: "Scan storage engine for zombie objects and move them to quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) + _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) + _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) + _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) + }, + Run: scan, + } + listCmd = &cobra.Command{ + Use: "list", + Short: "List zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + }, + Run: list, + } + restoreCmd = &cobra.Command{ + Use: "restore", + Short: "Restore zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: restore, + } + removeCmd = &cobra.Command{ + Use: "remove", + Short: "Remove zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: remove, + } +) + +func init() { + initScanCmd() + initListCmd() + initRestoreCmd() + initRemoveCmd() +} + +func initScanCmd() { + Cmd.AddCommand(scanCmd) + + scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) + scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) + scanCmd.Flags().String(addressFlag, "", addressFlagUsage) + scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) +} + +func initListCmd() { + Cmd.AddCommand(listCmd) + + listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + listCmd.Flags().String(cidFlag, "", cidFlagUsage) +} + +func initRestoreCmd() { + Cmd.AddCommand(restoreCmd) + + restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) + restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) +} + +func initRemoveCmd() { + Cmd.AddCommand(removeCmd) + + removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + removeCmd.Flags().String(cidFlag, "", cidFlagUsage) + removeCmd.Flags().String(oidFlag, "", oidFlagUsage) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go new file mode 100644 index 000000000..268ec4911 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go @@ -0,0 +1,281 @@ +package zombie + +import ( + "context" + "crypto/ecdsa" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +func scan(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) + if batchSize == 0 { + commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) + } + move, _ := cmd.Flags().GetBool(moveFlag) + + storageEngine := newEngine(cmd, appCfg) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + nmCli := createNetmapClient(cmd, morphClient) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + pk := getPrivateKey(cmd, appCfg) + + epoch, err := nmCli.Epoch(cmd.Context()) + commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) + + nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) + commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) + + cmd.Printf("Epoch: %d\n", nm.Epoch()) + cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) + + ps := &processStatus{ + statusCount: make(map[status]uint64), + } + + stopCh := make(chan struct{}) + start := time.Now() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tick := time.NewTicker(time.Second) + defer tick.Stop() + for { + select { + case <-cmd.Context().Done(): + return + case <-stopCh: + return + case <-tick.C: + fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) + } + } + }() + go func() { + defer wg.Done() + err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) + close(stopCh) + }() + wg.Wait() + commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) + + cmd.Println() + cmd.Println("Status description:") + cmd.Println("undefined -- nothing is clear") + cmd.Println("found -- object is found in cluster") + cmd.Println("quarantine -- object is not found in cluster") + cmd.Println() + for status, count := range ps.statusCount { + cmd.Printf("Status: %s, Count: %d\n", status, count) + } +} + +type status string + +const ( + statusUndefined status = "undefined" + statusFound status = "found" + statusQuarantine status = "quarantine" +) + +func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { + rawCID := make([]byte, sha256.Size) + cid := obj.Address.Container() + cid.Encode(rawCID) + + cnr, err := cnrCli.Get(ctx, rawCID) + if err != nil { + var errContainerNotFound *apistatus.ContainerNotFound + if errors.As(err, &errContainerNotFound) { + // Policer will deal with this object. + return statusFound, nil + } + return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) + } + nm, err := nmCli.NetMap(ctx) + if err != nil { + return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) + } + + nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) + if err != nil { + // Not enough nodes, check all netmap nodes. + nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) + } + + objID := obj.Address.Object() + cnrID := obj.Address.Container() + local := true + raw := false + if obj.ECInfo != nil { + objID = obj.ECInfo.ParentID + local = false + raw = true + } + prm := clientSDK.PrmObjectHead{ + ObjectID: &objID, + ContainerID: &cnrID, + Local: local, + Raw: raw, + } + + var ni clientCore.NodeInfo + for i := range nodes { + for j := range nodes[i] { + if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { + return statusUndefined, fmt.Errorf("parse node info: %w", err) + } + c, err := cc.Get(ni) + if err != nil { + continue + } + res, err := c.ObjectHead(ctx, prm) + if err != nil { + var errECInfo *objectSDK.ECInfoError + if raw && errors.As(err, &errECInfo) { + return statusFound, nil + } + continue + } + if err := apistatus.ErrFromStatus(res.Status()); err != nil { + continue + } + return statusFound, nil + } + } + + if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { + return statusFound, nil + } + return statusQuarantine, nil +} + +func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, + appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, +) error { + cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ + DialTimeout: apiclientconfig.DialTimeout(appCfg), + StreamTimeout: apiclientconfig.StreamTimeout(appCfg), + ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), + Key: pk, + AllowExternal: apiclientconfig.AllowExternal(appCfg), + }) + ctx := cmd.Context() + + var cursor *engine.Cursor + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var prm engine.ListWithCursorPrm + prm.WithCursor(cursor) + prm.WithCount(batchSize) + + res, err := storageEngine.ListWithCursor(ctx, prm) + if err != nil { + if errors.Is(err, engine.ErrEndOfListing) { + return nil + } + return fmt.Errorf("list with cursor: %w", err) + } + + cursor = res.Cursor() + addrList := res.AddressList() + eg, egCtx := errgroup.WithContext(ctx) + eg.SetLimit(int(batchSize)) + + for i := range addrList { + addr := addrList[i] + eg.Go(func() error { + result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) + if err != nil { + return fmt.Errorf("check object %s status: %w", addr.Address, err) + } + ps.add(result) + + if !move && result == statusQuarantine { + cmd.Println(addr) + return nil + } + + if result == statusQuarantine { + return moveToQuarantine(egCtx, storageEngine, q, addr.Address) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return fmt.Errorf("process objects batch: %w", err) + } + } +} + +func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { + var getPrm engine.GetPrm + getPrm.WithAddress(addr) + res, err := storageEngine.Get(ctx, getPrm) + if err != nil { + return fmt.Errorf("get object %s from storage engine: %w", addr, err) + } + + if err := q.Put(ctx, res.Object()); err != nil { + return fmt.Errorf("put object %s to quarantine: %w", addr, err) + } + + var delPrm engine.DeletePrm + delPrm.WithForceRemoval() + delPrm.WithAddress(addr) + + if err = storageEngine.Delete(ctx, delPrm); err != nil { + return fmt.Errorf("delete object %s from storage engine: %w", addr, err) + } + return nil +} + +type processStatus struct { + guard sync.RWMutex + statusCount map[status]uint64 + count uint64 +} + +func (s *processStatus) add(st status) { + s.guard.Lock() + defer s.guard.Unlock() + s.statusCount[st]++ + s.count++ +} + +func (s *processStatus) total() uint64 { + s.guard.RLock() + defer s.guard.RUnlock() + return s.count +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go new file mode 100644 index 000000000..5be34d502 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -0,0 +1,201 @@ +package zombie + +import ( + "context" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" + shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" + fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "github.com/panjf2000/ants/v2" + "github.com/spf13/cobra" + "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { + ngOpts := storageEngineOptions(c) + shardOpts := shardOptions(cmd, c) + e := engine.New(ngOpts...) + for _, opts := range shardOpts { + _, err := e.AddShard(cmd.Context(), opts...) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + } + commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) + commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) + return e +} + +func storageEngineOptions(c *config.Config) []engine.Option { + return []engine.Option{ + engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), + engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), + } +} + +func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { + var result [][]shard.Option + err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { + result = append(result, getShardOpts(cmd, c, sh)) + return nil + }) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + return result +} + +func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { + wc, wcEnabled := getWriteCacheOpts(sh) + return []shard.Option{ + shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + shard.WithRefillMetabase(sh.RefillMetabase()), + shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), + shard.WithMode(sh.Mode()), + shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), + shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), + shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), + shard.WithWriteCache(wcEnabled), + shard.WithWriteCacheOptions(wc), + shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), + shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), + shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), + shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) + return pool + }), + shard.WithLimiter(qos.NewNoopLimiter()), + } +} + +func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { + if wc := sh.WriteCache(); wc != nil && wc.Enabled() { + var result []writecache.Option + result = append(result, + writecache.WithPath(wc.Path()), + writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), + writecache.WithMaxObjectSize(wc.MaxObjectSize()), + writecache.WithFlushWorkersCount(wc.WorkerCount()), + writecache.WithMaxCacheSize(wc.SizeLimit()), + writecache.WithMaxCacheCount(wc.CountLimit()), + writecache.WithNoSync(wc.NoSync()), + writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + writecache.WithQoSLimiter(qos.NewNoopLimiter()), + ) + return result, true + } + return nil, false +} + +func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { + var piloramaOpts []pilorama.Option + if config.BoolSafe(c.Sub("tree"), "enabled") { + pr := sh.Pilorama() + piloramaOpts = append(piloramaOpts, + pilorama.WithPath(pr.Path()), + pilorama.WithPerm(pr.Perm()), + pilorama.WithNoSync(pr.NoSync()), + pilorama.WithMaxBatchSize(pr.MaxBatchSize()), + pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), + ) + } + return piloramaOpts +} + +func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { + return []meta.Option{ + meta.WithPath(sh.Metabase().Path()), + meta.WithPermissions(sh.Metabase().BoltDB().Perm()), + meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), + meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), + meta.WithBoltDBOptions(&bbolt.Options{ + Timeout: 100 * time.Millisecond, + }), + meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + meta.WithEpochState(&epochState{}), + } +} + +func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { + result := []blobstor.Option{ + blobstor.WithCompression(sh.Compression()), + blobstor.WithStorages(getSubStorages(ctx, sh)), + blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + return result +} + +func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { + var ss []blobstor.SubStorage + for _, storage := range sh.BlobStor().Storages() { + switch storage.Type() { + case blobovniczatree.Type: + sub := blobovniczaconfig.From((*config.Config)(storage)) + blobTreeOpts := []blobovniczatree.Option{ + blobovniczatree.WithRootPath(storage.Path()), + blobovniczatree.WithPermissions(storage.Perm()), + blobovniczatree.WithBlobovniczaSize(sub.Size()), + blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), + blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), + blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), + blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), + blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), + blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), + blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), + blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), + Policy: func(_ *objectSDK.Object, data []byte) bool { + return uint64(len(data)) < sh.SmallSizeLimit() + }, + }) + case fstree.Type: + sub := fstreeconfig.From((*config.Config)(storage)) + fstreeOpts := []fstree.Option{ + fstree.WithPath(storage.Path()), + fstree.WithPerm(storage.Perm()), + fstree.WithDepth(sub.Depth()), + fstree.WithNoSync(sub.NoSync()), + fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: fstree.New(fstreeOpts...), + Policy: func(_ *objectSDK.Object, _ []byte) bool { + return true + }, + }) + default: + // should never happen, that has already + // been handled: when the config was read + } + } + return ss +} + +type epochState struct{} + +func (epochState) CurrentEpoch() uint64 { + return 0 +} diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go index 00b30c9b2..c0c290c5e 100644 --- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go +++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go @@ -28,6 +28,7 @@ const ( var ( errNoPathsFound = errors.New("no metabase paths found") errNoMorphEndpointsFound = errors.New("no morph endpoints found") + errUpgradeFailed = errors.New("upgrade failed") ) var UpgradeCmd = &cobra.Command{ @@ -91,14 +92,19 @@ func upgrade(cmd *cobra.Command, _ []string) error { if err := eg.Wait(); err != nil { return err } + allSuccess := true for mb, ok := range result { if ok { cmd.Println(mb, ": success") } else { cmd.Println(mb, ": failed") + allSuccess = false } } - return nil + if allSuccess { + return nil + } + return errUpgradeFailed } func getMetabasePaths(appCfg *config.Config) ([]string, error) { @@ -135,7 +141,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er if err != nil { return nil, fmt.Errorf("resolve container contract hash: %w", err) } - cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary()) + cc, err := morphcontainer.NewFromMorph(cli, sh, 0) if err != nil { return nil, fmt.Errorf("create morph container client: %w", err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go index 8fcd4a441..1960faab4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go @@ -8,7 +8,7 @@ import ( commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -200,7 +200,7 @@ func listRuleChains(cmd *cobra.Command, _ []string) { func setAdmin(cmd *cobra.Command, _ []string) { s, _ := cmd.Flags().GetString(addrAdminFlag) - addr, err := util.Uint160DecodeStringLE(s) + addr, err := address.StringToUint160(s) commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err) pci, ac := newPolicyContractInterface(cmd) h, vub, err := pci.SetAdmin(addr) @@ -214,7 +214,7 @@ func getAdmin(cmd *cobra.Command, _ []string) { pci, _ := newPolicyContractReaderInterface(cmd) addr, err := pci.GetAdmin() commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err) - cmd.Println(addr.StringLE()) + cmd.Println(address.Uint160ToString(addr)) } func listTargets(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 6780e6dd3..3c332c3f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -3,6 +3,8 @@ package ape import ( "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -53,16 +55,15 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { } func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) inv := invoker.New(c, nil) - var ch util.Uint160 r := management.NewReader(inv) nnsCs, err := helper.GetContractByID(r, 1) commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) + ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err) invokerAdapter := &invokerAdapter{ @@ -74,10 +75,11 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag } func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName) + walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) + ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName}) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) var ch util.Uint160 diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index 5519705d4..23dba14f4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -51,7 +52,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error { nmHash util.Uint160 ) - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return err } @@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) alphaRes, err := c.InvokeScript(w.Bytes(), nil) if err != nil { @@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan for i := range accounts { emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) res, err := c.Run(w.Bytes()) if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index 3a7f84acb..c17fb62ff 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -26,7 +26,7 @@ import ( const forceConfigSet = "force" func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -63,16 +63,16 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: nbuf := make([]byte, 8) - copy(nbuf[:], v) + copy(nbuf, v) n := binary.LittleEndian.Uint64(nbuf) - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: if len(v) == 0 || len(v) > 1 { return helper.InvalidConfigValueErr(k) } - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) default: - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v)))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) } } diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index 6f08d1655..79685f111 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/io" @@ -76,7 +77,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error { return fmt.Errorf("invalid filename: %w", err) } - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -157,7 +158,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo } func listContainers(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -235,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd putContainer(bw, ch, cnt) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go index 5adb480da..543b5fcb3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error { } } - if writer.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } + assert.NoError(writer.Err, "can't create deployment script") if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { return err @@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) } - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } else if bw.Len() != start { + assert.NoError(bw.Err, "can't create deployment script") + if bw.Len() != start { writer.WriteBytes(bw.Bytes()) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index be2134b77..fde58fd2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -36,7 +37,7 @@ type contractDumpInfo struct { } func dumpContractHashes(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -219,8 +220,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) { if info.version == "" { info.version = "unknown" } - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n", - info.name, info.version, info.hash.StringLE()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", + info.name, info.version, info.hash.StringLE())) } _ = tw.Flush() @@ -236,21 +237,17 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } else { sub.Reset() emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - if sub.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(sub.Err, "can't create version script") script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.BinWriter.WriteBytes(script) + bw.WriteBytes(script) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Opcodes(bw.BinWriter, opcode.PUSH0) } } emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(bw.Err, "can't create version script") res, err := c.InvokeScript(bw.Bytes(), nil) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 74da52a8f..7f777db98 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,6 +1,7 @@ package frostfsid import ( + "encoding/hex" "errors" "fmt" "math/big" @@ -34,11 +35,16 @@ const ( subjectNameFlag = "subject-name" subjectKeyFlag = "subject-key" subjectAddressFlag = "subject-address" - includeNamesFlag = "include-names" + extendedFlag = "extended" groupNameFlag = "group-name" groupIDFlag = "group-id" rootNamespacePlaceholder = "" + + keyFlag = "key" + keyDescFlag = "Key for storing a value in the subject's KV storage" + valueFlag = "value" + valueDescFlag = "Value to be stored in the subject's KV storage" ) var ( @@ -152,6 +158,23 @@ var ( }, Run: frostfsidListGroupSubjects, } + + frostfsidSetKVCmd = &cobra.Command{ + Use: "set-kv", + Short: "Store a key-value pair in the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidSetKV, + } + frostfsidDeleteKVCmd = &cobra.Command{ + Use: "delete-kv", + Short: "Delete a value from the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidDeleteKV, + } ) func initFrostfsIDCreateNamespaceCmd() { @@ -187,7 +210,7 @@ func initFrostfsIDListSubjectsCmd() { Cmd.AddCommand(frostfsidListSubjectsCmd) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") - frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") + frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") } func initFrostfsIDCreateGroupCmd() { @@ -234,7 +257,22 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") + frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") +} + +func initFrostfsIDSetKVCmd() { + Cmd.AddCommand(frostfsidSetKVCmd) + frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag) + frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) +} + +func initFrostfsIDDeleteKVCmd() { + Cmd.AddCommand(frostfsidDeleteKVCmd) + frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag) } func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { @@ -254,7 +292,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) { reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaces() commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) namespaces, err := frostfsidclient.ParseNamespaces(items) @@ -299,34 +337,32 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) { } func frostfsidListSubjects(cmd *cobra.Command, _ []string) { - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) ns := getFrostfsIDNamespace(cmd) inv, _, hash := initInvoker(cmd) reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaceSubjects(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID)) + subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) for _, addr := range subAddresses { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(addr)) continue } - sessionID, it, err := reader.ListSubjects() + items, err := reader.GetSubject(addr) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) - commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) - subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name) + printSubjectInfo(cmd, addr, subj) + cmd.Println() } } @@ -366,7 +402,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroups(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) groups, err := frostfsidclient.ParseGroups(items) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) @@ -404,10 +440,49 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) } +func frostfsidSetKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + value, _ := cmd.Flags().GetString(valueFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "set KV: %w", err) +} + +func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "delete KV: %w", err) +} + func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) inv, cs, hash := initInvoker(cmd) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) @@ -416,7 +491,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) @@ -425,7 +500,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) for _, subjAddr := range subjects { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(subjAddr)) continue } @@ -434,7 +509,8 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name) + printSubjectInfo(cmd, subjAddr, subj) + cmd.Println() } } @@ -489,32 +565,28 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) { } f.bw.Reset() - if len(f.wCtx.SentTxs) == 0 { - return nil, errors.New("no transactions to wait") - } - f.wCtx.Command.Println("Waiting for transactions to persist...") return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) } -func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) { +func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { var shouldStop bool res := make([]stackitem.Item, 0) for !shouldStop { - items, err := inv.TraverseIterator(sessionID, iter, batchSize) + items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) if err != nil { return nil, err } res = append(res, items...) - shouldStop = len(items) < batchSize + shouldStop = len(items) < iteratorBatchSize } return res, nil } func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) inv := invoker.New(c, nil) @@ -528,3 +600,30 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui return inv, cs, nmHash } + +func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) { + cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) + pk := "" + if subj.PrimaryKey != nil { + pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) + } + cmd.Printf("Primary key: %s\n", pk) + cmd.Printf("Name: %s\n", subj.Name) + cmd.Printf("Namespace: %s\n", subj.Namespace) + if len(subj.AdditionalKeys) > 0 { + cmd.Printf("Additional keys:\n") + for _, key := range subj.AdditionalKeys { + k := "" + if key != nil { + k = hex.EncodeToString(key.Bytes()) + } + cmd.Printf("- %s\n", k) + } + } + if len(subj.KV) > 0 { + cmd.Printf("KV:\n") + for k, v := range subj.KV { + cmd.Printf("- %s: %s\n", k, v) + } + } +} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go index cce859d2f..1d0bc8441 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go @@ -1,59 +1,12 @@ package frostfsid import ( - "encoding/hex" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/spf13/viper" "github.com/stretchr/testify/require" ) -func TestFrostfsIDConfig(t *testing.T) { - pks := make([]*keys.PrivateKey, 4) - for i := range pks { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - pks[i] = pk - } - - fmts := []string{ - pks[0].GetScriptHash().StringLE(), - address.Uint160ToString(pks[1].GetScriptHash()), - hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), - hex.EncodeToString(pks[3].PublicKey().Bytes()), - } - - for i := range fmts { - v := viper.New() - v.Set("frostfsid.admin", fmts[i]) - - actual, found, err := helper.GetFrostfsIDAdmin(v) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, pks[i].GetScriptHash(), actual) - } - - t.Run("bad key", func(t *testing.T) { - v := viper.New() - v.Set("frostfsid.admin", "abc") - - _, found, err := helper.GetFrostfsIDAdmin(v) - require.Error(t, err) - require.True(t, found) - }) - t.Run("missing key", func(t *testing.T) { - v := viper.New() - - _, found, err := helper.GetFrostfsIDAdmin(v) - require.NoError(t, err) - require.False(t, found) - }) -} - func TestNamespaceRegexp(t *testing.T) { for _, tc := range []struct { name string diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 6ffcaa487..8aad5c5c1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -12,6 +12,8 @@ func init() { initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() + initFrostfsIDSetKVCmd() + initFrostfsIDDeleteKVCmd() initFrostfsIDAddSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go index 7af776797..78f8617f1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" "github.com/nspcc-dev/neo-go/pkg/smartcontract" @@ -141,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key } func generateStorageCreds(cmd *cobra.Command, _ []string) error { - return refillGas(cmd, storageGasConfigFlag, true) -} - -func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) { - // storage wallet path is not part of the config - storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) - // wallet address is not part of the config - walletAddress, _ := cmd.Flags().GetString(walletAddressFlag) - - var gasReceiver util.Uint160 - - if len(walletAddress) != 0 { - gasReceiver, err = address.StringToUint160(walletAddress) - if err != nil { - return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) - } - } else { - if storageWalletPath == "" { - return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) - } - - var w *wallet.Wallet - - if createWallet { - w, err = wallet.NewWallet(storageWalletPath) - } else { - w, err = wallet.NewWalletFromFile(storageWalletPath) - } - - if err != nil { - return fmt.Errorf("can't create wallet: %w", err) - } - - if createWallet { - var password string - - label, _ := cmd.Flags().GetString(storageWalletLabelFlag) - password, err := config.GetStoragePassword(viper.GetViper(), label) - if err != nil { - return fmt.Errorf("can't fetch password: %w", err) - } - - if label == "" { - label = constants.SingleAccountName - } - - if err := w.CreateAccount(label, password); err != nil { - return fmt.Errorf("can't create account: %w", err) - } - } - - gasReceiver = w.Accounts[0].Contract.ScriptHash() + walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) + w, err := wallet.NewWallet(walletPath) + if err != nil { + return fmt.Errorf("create wallet: %w", err) } + label, _ := cmd.Flags().GetString(storageWalletLabelFlag) + password, err := config.GetStoragePassword(viper.GetViper(), label) + if err != nil { + return fmt.Errorf("can't fetch password: %w", err) + } + + if label == "" { + label = constants.SingleAccountName + } + + if err := w.CreateAccount(label, password); err != nil { + return fmt.Errorf("can't create account: %w", err) + } + return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash()) +} + +func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) { gasStr := viper.GetString(gasFlag) gasAmount, err := helper.ParseGASAmount(gasStr) @@ -208,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error } bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, - wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) + for _, gasReceiver := range gasReceivers { + emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, + wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) + emit.Opcodes(bw.BinWriter, opcode.ASSERT) + } if bw.Err != nil { return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go index 3633d9a8e..73c986713 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go @@ -1,7 +1,12 @@ package generate import ( + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -33,7 +38,27 @@ var ( _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) }, RunE: func(cmd *cobra.Command, _ []string) error { - return refillGas(cmd, commonflags.RefillGasAmountFlag, false) + storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag) + walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag) + + var gasReceivers []util.Uint160 + for _, walletAddress := range walletAddresses { + addr, err := address.StringToUint160(walletAddress) + if err != nil { + return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) + } + + gasReceivers = append(gasReceivers, addr) + } + for _, storageWalletPath := range storageWalletPaths { + w, err := wallet.NewWalletFromFile(storageWalletPath) + if err != nil { + return fmt.Errorf("can't create wallet: %w", err) + } + + gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash()) + } + return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...) }, } GenerateAlphabetCmd = &cobra.Command{ @@ -50,10 +75,10 @@ var ( func initRefillGasCmd() { RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") - RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet") + RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet") + RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet") RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer") - RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag) + RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag) } func initGenerateStorageCmd() { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index eb0444408..6499ace5f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -3,9 +3,6 @@ package helper import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -16,7 +13,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -28,32 +24,86 @@ type LocalActor struct { rpcInvoker invoker.RPCInvoke } +type AlphabetWallets struct { + Label string + Path string +} + +func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) { + w, err := GetAlphabetWallets(v, a.Path) + if err != nil { + return nil, err + } + + var accounts []*wallet.Account + for _, wall := range w { + acc, err := GetWalletAccount(wall, a.Label) + if err != nil { + return nil, err + } + accounts = append(accounts, acc) + } + return accounts, nil +} + +type RegularWallets struct{ Path string } + +func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) { + w, err := getRegularWallet(r.Path) + if err != nil { + return nil, err + } + + return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil +} + // NewLocalActor create LocalActor with accounts form provided wallets. // In case of empty wallets provided created actor with dummy account only for read operation. // // If wallets are provided, the contract client will use accounts with accName name from these wallets. // To determine which account name should be used in a contract client, refer to how the contract // verifies the transaction signature. -func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) { - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) +func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { var act *actor.Actor var accounts []*wallet.Account + var signers []actor.SignerAccount - wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) - commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) + if alphabet != nil { + account, err := alphabet.GetAccount(viper.GetViper()) + if err != nil { + return nil, err + } - for _, w := range wallets { - acc, err := GetWalletAccount(w, accName) - commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) - accounts = append(accounts, acc) + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) } - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: accounts[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: accounts[0], - }}) + + for _, w := range regularWallets { + if w == nil { + continue + } + account, err := w.GetAccount() + if err != nil { + return nil, err + } + + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) + } + + act, err := actor.New(c, signers) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go index eea3b040e..64d1c6393 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go @@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker) } if method != constants.UpdateMethodName || err == nil && !found { - h, found, err = GetFrostfsIDAdmin(viper.GetViper()) + h, found, err = getFrostfsIDAdmin(viper.GetViper()) } if err != nil { return nil, err diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go index f29042b82..fce2dfb74 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go @@ -11,7 +11,7 @@ import ( const frostfsIDAdminConfigKey = "frostfsid.admin" -func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { +func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { admin := v.GetString(frostfsIDAdminConfigKey) if admin == "" { return util.Uint160{}, false, nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go new file mode 100644 index 000000000..38991e962 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go @@ -0,0 +1,53 @@ +package helper + +import ( + "encoding/hex" + "testing" + + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" +) + +func TestFrostfsIDConfig(t *testing.T) { + pks := make([]*keys.PrivateKey, 4) + for i := range pks { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + pks[i] = pk + } + + fmts := []string{ + pks[0].GetScriptHash().StringLE(), + address.Uint160ToString(pks[1].GetScriptHash()), + hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), + hex.EncodeToString(pks[3].PublicKey().Bytes()), + } + + for i := range fmts { + v := viper.New() + v.Set("frostfsid.admin", fmts[i]) + + actual, found, err := getFrostfsIDAdmin(v) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, pks[i].GetScriptHash(), actual) + } + + t.Run("bad key", func(t *testing.T) { + v := viper.New() + v.Set("frostfsid.admin", "abc") + + _, found, err := getFrostfsIDAdmin(v) + require.Error(t, err) + require.True(t, found) + }) + t.Run("missing key", func(t *testing.T) { + v := viper.New() + + _, found, err := getFrostfsIDAdmin(v) + require.NoError(t, err) + require.False(t, found) + }) +} diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 961ceba53..50b5c1ec7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -13,9 +14,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" @@ -187,19 +186,9 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (* } func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - switch c.(type) { - case *rpcclient.Client: - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) - default: - b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) - if err != nil { - return false, fmt.Errorf("`isAvailable`: invalid response: %w", err) - } - - return b, nil - } + inv := invoker.New(c, nil) + reader := nns2.NewReader(inv, nnsHash) + return reader.IsAvailable(name) } func CheckNotaryEnabled(c Client) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index b5b6adf05..da5ffedae 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -13,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -21,6 +22,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -28,7 +30,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -134,12 +135,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex return nil, err } - accounts, err := createWalletAccounts(wallets) + accounts, err := getSingleAccounts(wallets) if err != nil { return nil, err } - cliCtx, err := DefaultClientContext(c, committeeAcc) + cliCtx, err := defaultClientContext(c, committeeAcc) if err != nil { return nil, fmt.Errorf("client context: %w", err) } @@ -191,7 +192,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) } c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String()) } else { - c, err = GetN3Client(v) + c, err = NewRemoteClient(v) } if err != nil { return nil, fmt.Errorf("can't create N3 client: %w", err) @@ -211,7 +212,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) { return ctrPath, nil } -func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { +func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { accounts := make([]*wallet.Account, len(wallets)) for i, w := range wallets { acc, err := GetWalletAccount(w, constants.SingleAccountName) @@ -375,9 +376,7 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen } act, err = actor.New(c.Client, signers) } else { - if withConsensus { - panic("BUG: should never happen") - } + assert.False(withConsensus, "BUG: should never happen") act, err = c.CommitteeAct, nil } if err != nil { @@ -411,11 +410,9 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { version, err := c.Client.GetVersion() - if err != nil { - // error appears only if client - // has not been initialized - panic(err) - } + // error appears only if client + // has not been initialized + assert.NoError(err) network := version.Protocol.Network // Use parameter context to avoid dealing with signature order. @@ -447,12 +444,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin for i := range tx.Signers { if tx.Signers[i].Account == h { + assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") if i < len(tx.Scripts) { tx.Scripts[i] = *w - } else if i == len(tx.Scripts) { + } + if i == len(tx.Scripts) { tx.Scripts = append(tx.Scripts, *w) - } else { - panic("BUG: invalid signing order") } return nil } @@ -510,9 +507,7 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) emit.Opcodes(bw.BinWriter, opcode.ASSERT) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) return bw.Bytes(), false, nil } @@ -524,12 +519,8 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U } func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone) - if err != nil { - return false, err - } - - return res.State == vmstate.Halt.String(), nil + avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) + return !avail, err } func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index ed028fb7c..46611c177 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" @@ -58,17 +59,59 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet return nil, err } - m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount)) - accounts := make([]*wallet.Account, len(wallets)) - for i := range accounts { - accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName) - if err != nil { - return nil, err + go bc.Run() + + accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets) + if err != nil { + return nil, err + } + + if cmd.Name() != "init" { + if err := restoreDump(bc, dumpPath); err != nil { + return nil, fmt.Errorf("restore dump: %w", err) } } + return &LocalClient{ + bc: bc, + dumpPath: dumpPath, + accounts: accounts, + }, nil +} + +func restoreDump(bc *core.Blockchain, dumpPath string) error { + f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) + if err != nil { + return fmt.Errorf("can't open local dump: %w", err) + } + defer f.Close() + + r := io.NewBinReaderFromIO(f) + + var skip uint32 + if bc.BlockHeight() != 0 { + skip = bc.BlockHeight() + 1 + } + + count := r.ReadU32LE() - skip + if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { + return err + } + return nil +} + +func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) { + accounts := make([]*wallet.Account, len(wallets)) + for i := range accounts { + acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName) + if err != nil { + return nil, err + } + accounts[i] = acc + } + indexMap := make(map[string]int) - for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee { + for i, pub := range cfg.StandbyCommittee { indexMap[pub] = i } @@ -77,37 +120,12 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet pj := accounts[j].PrivateKey().PublicKey().Bytes() return indexMap[string(pi)] < indexMap[string(pj)] }) - sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool { + sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool { return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1 }) - go bc.Run() - - if cmd.Name() != "init" { - f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) - if err != nil { - return nil, fmt.Errorf("can't open local dump: %w", err) - } - defer f.Close() - - r := io.NewBinReaderFromIO(f) - - var skip uint32 - if bc.BlockHeight() != 0 { - skip = bc.BlockHeight() + 1 - } - - count := r.ReadU32LE() - skip - if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { - return nil, fmt.Errorf("can't restore local dump: %w", err) - } - } - - return &LocalClient{ - bc: bc, - dumpPath: dumpPath, - accounts: accounts[:m], - }, nil + m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount)) + return accounts[:m], nil } func (l *LocalClient) GetBlockCount() (uint32, error) { @@ -128,11 +146,6 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul return &a, nil } -func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) { - // not used by `morph init` command - panic("unexpected call") -} - // InvokeFunction is implemented via `InvokeScript`. func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) { var err error @@ -296,13 +309,7 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer) } func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) { - // We need to test that transaction was formed correctly to catch as many errors as we can. - bs := tx.Bytes() - _, err := transaction.NewTransactionFromBytes(bs) - if err != nil { - return tx.Hash(), fmt.Errorf("invalid transaction: %w", err) - } - + tx = tx.Copy() l.transactions = append(l.transactions, tx) return tx.Hash(), nil } @@ -310,9 +317,7 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint func (l *LocalClient) putTransactions() error { // 1. Prepare new block. lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - if err != nil { - panic(err) - } + assert.NoError(err) defer func() { l.transactions = l.transactions[:0] }() b := &block.Block{ @@ -353,9 +358,7 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s w := io.NewBufBinWriter() emit.Array(w.BinWriter, parameters...) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - if w.Err != nil { - panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) - } + assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) return c.InvokeScript(w.Bytes(), signers) } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index e62a21b3f..3f3a66cb6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" @@ -25,15 +24,10 @@ import ( // Client represents N3 client interface capable of test-invoking scripts // and sending signed transactions to chain. type Client interface { - invoker.RPCInvoke + actor.RPCActor - GetBlockCount() (uint32, error) GetNativeContracts() ([]state.Contract, error) GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error) - GetVersion() (*result.Version, error) - SendRawTransaction(*transaction.Transaction) (util.Uint256, error) - GetCommittee() (keys.PublicKeys, error) - CalculateNetworkFee(tx *transaction.Transaction) (int64, error) } type HashVUBPair struct { @@ -48,7 +42,7 @@ type ClientContext struct { SentTxs []HashVUBPair } -func GetN3Client(v *viper.Viper) (Client, error) { +func NewRemoteClient(v *viper.Viper) (Client, error) { // number of opened connections // by neo-go client per one host const ( @@ -88,8 +82,14 @@ func GetN3Client(v *viper.Viper) (Client, error) { return c, nil } -func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { - commAct, err := NewActor(c, committeeAcc) +func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { + commAct, err := actor.New(c, []actor.SignerAccount{{ + Signer: transaction.Signer{ + Account: committeeAcc.Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: committeeAcc, + }}) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go index fb8f03783..20abaff0a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go @@ -3,6 +3,7 @@ package helper import ( "errors" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -118,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error { return err } for k, v := range m { - for _, key := range NetmapConfigKeys { - if k == key { - md[k] = v - break - } + if slices.Contains(NetmapConfigKeys, k) { + md[k] = v } } return nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index 8c6b90539..be6b2c6dd 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -14,16 +14,36 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" + "github.com/nspcc-dev/neo-go/cli/input" "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/viper" ) +func getRegularWallet(walletPath string) (*wallet.Wallet, error) { + w, err := wallet.NewWalletFromFile(walletPath) + if err != nil { + return nil, err + } + + password, err := input.ReadPassword("Enter password for wallet:") + if err != nil { + return nil, fmt.Errorf("can't fetch password: %w", err) + } + + for i := range w.Accounts { + if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { + err = fmt.Errorf("can't unlock wallet: %w", err) + break + } + } + + return w, err +} + func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { wallets, err := openAlphabetWallets(v, walletDir) if err != nil { @@ -53,7 +73,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er if errors.Is(err, os.ErrNotExist) { err = nil } else { - err = fmt.Errorf("can't open wallet: %w", err) + err = fmt.Errorf("can't open alphabet wallet: %w", err) } break } @@ -87,16 +107,6 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er return wallets, nil } -func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) { - return actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: committeeAcc.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: committeeAcc, - }}) -} - func ReadContract(ctrPath, ctrName string) (*ContractState, error) { rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef")) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go index e127ca545..176356378 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -111,9 +112,7 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - if w.Err != nil { - panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err)) - } + assert.NoError(w.Err, "can't wrap register script") } func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 4c6607f9a..7b7597d91 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -1,21 +1,18 @@ package initialize import ( - "errors" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -30,7 +27,8 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - regPrice, err := getCandidateRegisterPrice(c) + reader := neo.NewReader(c.ReadOnlyInvoker) + regPrice, err := reader.GetRegisterPrice() if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } @@ -42,9 +40,7 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error { emit.Opcodes(w.BinWriter, opcode.ASSERT) } emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - if w.Err != nil { - panic(fmt.Sprintf("BUG: %v", w.Err)) - } + assert.NoError(w.Err) signers := []actor.SignerAccount{{ Signer: c.GetSigner(false, c.CommitteeAcc), @@ -116,7 +112,7 @@ func registerCandidates(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { neoHash := neo.Hash - ok, err := transferNEOFinished(c, neoHash) + ok, err := transferNEOFinished(c) if ok || err != nil { return err } @@ -139,33 +135,8 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { return c.AwaitTx() } -func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) { - r := nep17.NewReader(c.ReadOnlyInvoker, neoHash) +func transferNEOFinished(c *helper.InitializeContext) (bool, error) { + r := neo.NewReader(c.ReadOnlyInvoker) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } - -var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") - -func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) { - switch c.Client.(type) { - case *rpcclient.Client: - inv := invoker.New(c.Client, nil) - reader := neo.NewReader(inv) - return reader.GetRegisterPrice() - default: - neoHash := neo.Hash - res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) - if err != nil { - return 0, err - } - if len(res.Stack) == 0 { - return 0, errGetPriceInvalid - } - bi, err := res.Stack[0].TryInteger() - if err != nil || !bi.IsInt64() { - return 0, errGetPriceInvalid - } - return bi.Int64(), nil - } -} diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index 7f1bfee2b..bb684b3a9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -22,15 +22,14 @@ import ( ) const ( - gasInitialTotalSupply = 30000000 * native.GASFactor // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. initialAlphabetGASAmount = 10_000 * native.GASFactor // initialProxyGASAmount represents the amount of GAS given to a proxy contract. initialProxyGASAmount = 50_000 * native.GASFactor ) -func initialCommitteeGASAmount(c *helper.InitializeContext) int64 { - return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 +func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { + return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 } func transferFunds(c *helper.InitializeContext) error { @@ -42,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error { return err } + version, err := c.Client.GetVersion() + if err != nil { + return err + } + var transfers []transferTarget for _, acc := range c.Accounts { to := acc.Contract.ScriptHash() @@ -59,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error { transferTarget{ Token: gas.Hash, Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: initialCommitteeGASAmount(c), + Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), }, transferTarget{ Token: neo.Hash, @@ -83,16 +87,23 @@ func transferFunds(c *helper.InitializeContext) error { // transferFundsFinished checks balances of accounts we transfer GAS to. // The stage is considered finished if the balance is greater than the half of what we need to transfer. func transferFundsFinished(c *helper.InitializeContext) (bool, error) { - acc := c.Accounts[0] - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - res, err := r.BalanceOf(acc.Contract.ScriptHash()) - if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 { + res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash()) + if err != nil { + return false, err + } + + version, err := c.Client.GetVersion() + if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { return false, err } res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) - return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err + if err != nil { + return false, err + } + + return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err } func transferGASToProxy(c *helper.InitializeContext) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go index d8471bb9a..a689e0ec1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go @@ -13,7 +13,7 @@ import ( ) func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) inv := invoker.New(c, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index 1668bb327..14f6eb390 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -6,7 +6,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" + "github.com/spf13/viper" ) func initRegisterCmd() { @@ -19,6 +21,7 @@ func initRegisterCmd() { registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") + registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) } @@ -48,6 +51,7 @@ func initDeleteCmd() { deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) } @@ -62,3 +66,28 @@ func deleteDomain(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) cmd.Println("Domain deleted successfully") } + +func initSetAdminCmd() { + Cmd.AddCommand(setAdminCmd) + setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage) + _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath) + + _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag) +} + +func setAdmin(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath)) + commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err) + h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash()) + + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "Set admin error: %w", err) + cmd.Println("Set admin successfully") +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 29b0a24ae..e49f62256 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -1,7 +1,11 @@ package nns import ( + "errors" + client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -13,10 +17,35 @@ import ( func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { v := viper.GetViper() - c, err := helper.GetN3Client(v) + c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName) + alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) + walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath)) + adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath)) + + var ( + alphabet *helper.AlphabetWallets + regularWallets []*helper.RegularWallets + ) + + if alphabetWalletPath != "" { + alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName} + } + + if walletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) + } + + if adminWalletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath}) + } + + if alphabet == nil && regularWallets == nil { + commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) + } + + ac, err := helper.NewLocalActor(c, alphabet, regularWallets...) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) r := management.NewReader(ac.Invoker) @@ -26,7 +55,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { } func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) inv := invoker.New(c, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 09ed92ab3..9cb47356f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -19,6 +19,7 @@ func initAddRecordCmd() { addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) @@ -40,6 +41,7 @@ func initDelRecordsCmd() { delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) + delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) @@ -52,6 +54,7 @@ func initDelRecordCmd() { delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index 9bdeaccd9..bb84933c6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -39,6 +39,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: registerDomain, } @@ -48,6 +49,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: deleteDomain, } @@ -75,6 +77,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: addRecord, } @@ -92,6 +95,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecords, } @@ -101,9 +105,21 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecord, } + setAdminCmd = &cobra.Command{ + Use: "set-admin", + Short: "Sets admin for domain", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) + _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath)) + }, + Run: setAdmin, + } ) func init() { @@ -116,4 +132,5 @@ func init() { initGetRecordsCmd() initDelRecordsCmd() initDelRecordCmd() + initSetAdminCmd() } diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go index 9b213da4e..3435926c0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "math/big" - "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" @@ -41,7 +40,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error { } accHash := w.GetChangeAddress() - if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil { + addr, _ := cmd.Flags().GetString(walletAccountFlag) + if addr != "" { accHash, err = address.StringToUint160(addr) if err != nil { return fmt.Errorf("invalid address: %s", addr) @@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't find account for %s", accHash) } - prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash)) + prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash)) pass, err := input.ReadPassword(prompt) if err != nil { return fmt.Errorf("can't get password: %v", err) @@ -73,23 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error { return err } - till := int64(defaultNotaryDepositLifetime) - tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag) - if err != nil { - return err - } - if tillStr != "" { - till, err = strconv.ParseInt(tillStr, 10, 64) - if err != nil || till <= 0 { - return errInvalidNotaryDepositLifetime - } + till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag) + if till <= 0 { + return errInvalidNotaryDepositLifetime } return transferGas(cmd, acc, accHash, gasAmount, till) } func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go index 497ff8ea1..d7be2e503 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/root.go @@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() { DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address") DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit") - DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks") + DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks") } func init() { diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go index 36547e22c..f2932e87c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go @@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error { } func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client:", err) inv := invoker.New(c, nil) @@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { buf := bytes.NewBuffer(nil) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee))) - _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte))) - _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice))) + _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) + _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) + _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go index cb575b657..24cda45a6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go @@ -20,23 +20,32 @@ const ( accountAddressFlag = "account" ) +func parseAddresses(cmd *cobra.Command) []util.Uint160 { + var addrs []util.Uint160 + + accs, _ := cmd.Flags().GetStringArray(accountAddressFlag) + for _, acc := range accs { + addr, err := address.StringToUint160(acc) + commonCmd.ExitOnErr(cmd, "invalid account: %w", err) + + addrs = append(addrs, addr) + } + return addrs +} + func addProxyAccount(cmd *cobra.Command, _ []string) { - acc, _ := cmd.Flags().GetString(accountAddressFlag) - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - err = processAccount(cmd, addr, "addAccount") + addrs := parseAddresses(cmd) + err := processAccount(cmd, addrs, "addAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } func removeProxyAccount(cmd *cobra.Command, _ []string) { - acc, _ := cmd.Flags().GetString(accountAddressFlag) - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - err = processAccount(cmd, addr, "removeAccount") + addrs := parseAddresses(cmd) + err := processAccount(cmd, addrs, "removeAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } -func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error { +func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) if err != nil { return fmt.Errorf("can't initialize context: %w", err) @@ -54,7 +63,9 @@ func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error } bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) + for _, addr := range addrs { + emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) + } if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go index 1854c8d2b..ad89af2b5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go @@ -29,13 +29,15 @@ var ( func initProxyAddAccount() { AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") + _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initProxyRemoveAccount() { RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") + _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index defd898c8..cc8225c7a 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -5,9 +5,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" @@ -41,8 +41,8 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) - rootCmd.AddCommand(storagecfg.RootCmd) rootCmd.AddCommand(metabase.RootCmd) + rootCmd.AddCommand(maintenance.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go deleted file mode 100644 index 77183fb49..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go +++ /dev/null @@ -1,137 +0,0 @@ -package storagecfg - -const configTemplate = `logger: - level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" - -node: - wallet: - path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented - address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented - password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented - addresses: # list of addresses announced by Storage node in the Network map - - {{ .AnnouncedAddress }} - attribute_0: UN-LOCODE:{{ .Attribute.Locode }} - relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map - -grpc: - num: 1 # total number of listener endpoints - 0: - endpoint: {{ .Endpoint }} # endpoint for gRPC server - tls:{{if .TLSCert}} - enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2) - certificate: {{ .TLSCert }} # path to TLS certificate - key: {{ .TLSKey }} # path to TLS key - {{- else }} - enabled: false # disable TLS for a gRPC connection - {{- end}} - -control: - authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service - {{- range .AuthorizedKeys }} - - {{.}}{{end}} - grpc: - endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service - -morph: - dial_timeout: 20s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # use TTL cache for side chain GET operations - rpc_endpoint: # side chain N3 RPC endpoints - {{- range .MorphRPC }} - - address: wss://{{.}}/ws{{end}} -{{if not .Relay }} -storage: - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations - - shard: - default: # section with the default shard parameters - metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) - - blobstor: - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) - depth: 2 # max depth of object tree storage in FS - small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes - compress: true # turn on/off Zstandard compression (level 3) of stored objects - compression_exclude_content_types: - - audio/* - - video/* - - blobovnicza: - size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - depth: 1 # max depth of object tree storage in key-value DB - width: 4 # max width of object tree storage in key-value DB - opened_cache_capacity: 50 # maximum number of opened database files - opened_cache_ttl: 5m # ttl for opened database file - opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - gc: - remover_batch_size: 200 # number of objects to be removed by the garbage collector - remover_sleep_interval: 5m # frequency of the garbage collector invocation - 0: - mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only" - - metabase: - path: {{ .MetabasePath }} # path to the metabase - - blobstor: - path: {{ .BlobstorPath }} # path to the blobstor -{{end}}` - -const ( - neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221" - balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55" - neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1" - balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf" -) - -var n3config = map[string]struct { - MorphRPC []string - RPC []string - NeoFSContract string - BalanceContract string -}{ - "testnet": { - MorphRPC: []string{ - "rpc01.morph.testnet.fs.neo.org:51331", - "rpc02.morph.testnet.fs.neo.org:51331", - "rpc03.morph.testnet.fs.neo.org:51331", - "rpc04.morph.testnet.fs.neo.org:51331", - "rpc05.morph.testnet.fs.neo.org:51331", - "rpc06.morph.testnet.fs.neo.org:51331", - "rpc07.morph.testnet.fs.neo.org:51331", - }, - RPC: []string{ - "rpc01.testnet.n3.nspcc.ru:21331", - "rpc02.testnet.n3.nspcc.ru:21331", - "rpc03.testnet.n3.nspcc.ru:21331", - "rpc04.testnet.n3.nspcc.ru:21331", - "rpc05.testnet.n3.nspcc.ru:21331", - "rpc06.testnet.n3.nspcc.ru:21331", - "rpc07.testnet.n3.nspcc.ru:21331", - }, - NeoFSContract: neofsTestnetAddress, - BalanceContract: balanceTestnetAddress, - }, - "mainnet": { - MorphRPC: []string{ - "rpc1.morph.fs.neo.org:40341", - "rpc2.morph.fs.neo.org:40341", - "rpc3.morph.fs.neo.org:40341", - "rpc4.morph.fs.neo.org:40341", - "rpc5.morph.fs.neo.org:40341", - "rpc6.morph.fs.neo.org:40341", - "rpc7.morph.fs.neo.org:40341", - }, - RPC: []string{ - "rpc1.n3.nspcc.ru:10331", - "rpc2.n3.nspcc.ru:10331", - "rpc3.n3.nspcc.ru:10331", - "rpc4.n3.nspcc.ru:10331", - "rpc5.n3.nspcc.ru:10331", - "rpc6.n3.nspcc.ru:10331", - "rpc7.n3.nspcc.ru:10331", - }, - NeoFSContract: neofsMainnetAddress, - BalanceContract: balanceMainnetAddress, - }, -} diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go deleted file mode 100644 index 127272da5..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ /dev/null @@ -1,433 +0,0 @@ -package storagecfg - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "fmt" - "math/rand" - "net" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "text/template" - "time" - - netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "github.com/chzyer/readline" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - - "github.com/spf13/cobra" -) - -const ( - walletFlag = "wallet" - accountFlag = "account" -) - -const ( - defaultControlEndpoint = "localhost:8090" - defaultDataEndpoint = "localhost" -) - -// RootCmd is a root command of config section. -var RootCmd = &cobra.Command{ - Use: "storage-config [-w wallet] [-a acccount] []", - Short: "Section for storage node configuration commands", - Run: storageConfig, -} - -func init() { - fs := RootCmd.Flags() - - fs.StringP(walletFlag, "w", "", "Path to wallet") - fs.StringP(accountFlag, "a", "", "Wallet account") -} - -type config struct { - AnnouncedAddress string - AuthorizedKeys []string - ControlEndpoint string - Endpoint string - TLSCert string - TLSKey string - MorphRPC []string - Attribute struct { - Locode string - } - Wallet struct { - Path string - Account string - Password string - } - Relay bool - BlobstorPath string - MetabasePath string -} - -func storageConfig(cmd *cobra.Command, args []string) { - outPath := getOutputPath(args) - - historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history") - readline.SetHistoryPath(historyPath) - - var c config - - c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag) - if c.Wallet.Path == "" { - c.Wallet.Path = getPath("Path to the storage node wallet: ") - } - - w, err := wallet.NewWalletFromFile(c.Wallet.Path) - fatalOnErr(err) - - fillWalletAccount(cmd, &c, w) - - accH, err := flags.ParseAddress(c.Wallet.Account) - fatalOnErr(err) - - acc := w.GetAccount(accH) - if acc == nil { - fatalOnErr(errors.New("can't find account in wallet")) - } - - c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account)) - fatalOnErr(err) - - err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) - fatalOnErr(err) - - c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes())) - - network := readNetwork(cmd) - - c.MorphRPC = n3config[network].MorphRPC - - depositGas(cmd, acc, network) - - c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ") - - endpoint := getDefaultEndpoint(cmd, &c) - c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint)) - if c.Endpoint == "" { - c.Endpoint = endpoint - } - - c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint)) - if c.ControlEndpoint == "" { - c.ControlEndpoint = defaultControlEndpoint - } - - c.TLSCert = getPath("TLS Certificate (optional): ") - if c.TLSCert != "" { - c.TLSKey = getPath("TLS Key: ") - } - - c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ") - if !c.Relay { - p := getPath("Path to the storage directory (all available storage will be used): ") - c.BlobstorPath = filepath.Join(p, "blob") - c.MetabasePath = filepath.Join(p, "meta") - } - - out := applyTemplate(c) - fatalOnErr(os.WriteFile(outPath, out, 0o644)) - - cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`") -} - -func getDefaultEndpoint(cmd *cobra.Command, c *config) string { - var addr, port string - for { - c.AnnouncedAddress = getString("Publicly announced address: ") - validator := netutil.Address{} - err := validator.FromString(c.AnnouncedAddress) - if err != nil { - cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.") - continue - } - uriAddr, err := url.Parse(validator.URIAddr()) - if err != nil { - panic(fmt.Errorf("unexpected error: %w", err)) - } - addr = uriAddr.Hostname() - port = uriAddr.Port() - ip, err := net.ResolveIPAddr("ip", addr) - if err != nil { - cmd.Printf("Can't resolve IP address %s: %v\n", addr, err) - continue - } - - if !ip.IP.IsGlobalUnicast() { - cmd.Println("IP must be global unicast.") - continue - } - cmd.Printf("Resolved IP address: %s\n", ip.String()) - - _, err = strconv.ParseUint(port, 10, 16) - if err != nil { - cmd.Println("Port must be an integer.") - continue - } - - break - } - return net.JoinHostPort(defaultDataEndpoint, port) -} - -func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) { - c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag) - if c.Wallet.Account == "" { - addr := address.Uint160ToString(w.GetChangeAddress()) - c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr)) - if c.Wallet.Account == "" { - c.Wallet.Account = addr - } - } -} - -func readNetwork(cmd *cobra.Command) string { - var network string - for { - network = getString("Choose network [mainnet]/testnet: ") - switch network { - case "": - network = "mainnet" - case "testnet", "mainnet": - default: - cmd.Println(`Network must be either "mainnet" or "testnet"`) - continue - } - break - } - return network -} - -func getOutputPath(args []string) string { - if len(args) != 0 { - return args[0] - } - outPath := getPath("File to write config at [./config.yml]: ") - if outPath == "" { - outPath = "./config.yml" - } - return outPath -} - -func getWalletAccount(w *wallet.Wallet, prompt string) string { - addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts)) - for i := range w.Accounts { - addrs[i] = readline.PcItem(w.Accounts[i].Address) - } - - readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...)) - defer readline.SetAutoComplete(nil) - - s, err := readline.Line(prompt) - fatalOnErr(err) - return strings.TrimSpace(s) // autocompleter can return a string with a trailing space -} - -func getString(prompt string) string { - s, err := readline.Line(prompt) - fatalOnErr(err) - if s != "" { - _ = readline.AddHistory(s) - } - return s -} - -type filenameCompleter struct{} - -func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { - prefix := string(line[:pos]) - dir := filepath.Dir(prefix) - de, err := os.ReadDir(dir) - if err != nil { - return nil, 0 - } - - for i := range de { - name := filepath.Join(dir, de[i].Name()) - if strings.HasPrefix(name, prefix) { - tail := []rune(strings.TrimPrefix(name, prefix)) - if de[i].IsDir() { - tail = append(tail, filepath.Separator) - } - newLine = append(newLine, tail) - } - } - if pos != 0 { - return newLine, pos - len([]rune(dir)) - } - return newLine, 0 -} - -func getPath(prompt string) string { - readline.SetAutoComplete(filenameCompleter{}) - defer readline.SetAutoComplete(nil) - - p, err := readline.Line(prompt) - fatalOnErr(err) - - if p == "" { - return p - } - - _ = readline.AddHistory(p) - - abs, err := filepath.Abs(p) - if err != nil { - fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err)) - } - - return abs -} - -func getConfirmation(def bool, prompt string) bool { - for { - s, err := readline.Line(prompt) - fatalOnErr(err) - - switch strings.ToLower(s) { - case "y", "yes": - return true - case "n", "no": - return false - default: - if len(s) == 0 { - return def - } - } - } -} - -func applyTemplate(c config) []byte { - tmpl, err := template.New("config").Parse(configTemplate) - fatalOnErr(err) - - b := bytes.NewBuffer(nil) - fatalOnErr(tmpl.Execute(b, c)) - - return b.Bytes() -} - -func fatalOnErr(err error) { - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) { - sideClient := initClient(n3config[network].MorphRPC) - balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract) - - sideActor, err := actor.NewSimple(sideClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err)) - } - - sideGas := nep17.NewReader(sideActor, balanceHash) - accSH := acc.Contract.ScriptHash() - - balance, err := sideGas.BalanceOf(accSH) - if err != nil { - fatalOnErr(fmt.Errorf("side chain balance: %w", err)) - } - - ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ", - fixedn.ToString(balance, 12))) - if !ok { - return - } - - amountStr := getString("Enter amount in GAS: ") - amount, err := fixedn.FromString(amountStr, 8) - if err != nil { - fatalOnErr(fmt.Errorf("invalid amount: %w", err)) - } - - mainClient := initClient(n3config[network].RPC) - neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract) - - mainActor, err := actor.NewSimple(mainClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err)) - } - - mainGas := nep17.New(mainActor, gas.Hash) - - txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil) - if err != nil { - fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err)) - } - - cmd.Print("Waiting for transactions to persist.") - tick := time.NewTicker(time.Second / 2) - defer tick.Stop() - - timer := time.NewTimer(time.Second * 20) - defer timer.Stop() - - at := trigger.Application - -loop: - for { - select { - case <-tick.C: - _, err := mainClient.GetApplicationLog(txHash, &at) - if err == nil { - cmd.Print("\n") - break loop - } - cmd.Print(".") - case <-timer.C: - cmd.Printf("\nTimeout while waiting for transaction to persist.\n") - if getConfirmation(false, "Continue configuration? yes/[no]: ") { - return - } - os.Exit(1) - } - } -} - -func initClient(rpc []string) *rpcclient.Client { - var c *rpcclient.Client - var err error - - shuffled := make([]string, len(rpc)) - copy(shuffled, rpc) - rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) - - for _, endpoint := range shuffled { - c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{ - DialTimeout: time.Second * 2, - RequestTimeout: time.Second * 5, - }) - if err != nil { - continue - } - if err = c.Init(); err != nil { - continue - } - return c - } - - fatalOnErr(fmt.Errorf("can't create N3 client: %w", err)) - panic("unreachable") -} diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 948d61f36..299d0a830 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -9,8 +9,6 @@ import ( "io" "os" "slices" - "sort" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -78,13 +76,29 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain // SortedIDList returns sorted list of identifiers of user's containers. func (x ListContainersRes) SortedIDList() []cid.ID { list := x.cliRes.Containers() - sort.Slice(list, func(i, j int) bool { - lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString() - return strings.Compare(lhs, rhs) < 0 - }) + slices.SortFunc(list, cid.ID.Cmp) return list } +func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) { + cliPrm := &client.PrmContainerListStream{ + XHeaders: prm.XHeaders, + OwnerID: prm.OwnerID, + Session: prm.Session, + } + rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm) + if err != nil { + return fmt.Errorf("init container list: %w", err) + } + + err = rdr.Iterate(processCnr) + if err != nil { + return fmt.Errorf("read container list: %w", err) + } + + return +} + // PutContainerPrm groups parameters of PutContainer operation. type PutContainerPrm struct { Client *client.Client @@ -670,9 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes return nil, fmt.Errorf("read object list: %w", err) } - slices.SortFunc(list, func(a, b oid.ID) int { - return strings.Compare(a.EncodeToString(), b.EncodeToString()) - }) + slices.SortFunc(list, oid.ID.Cmp) return &SearchObjectsRes{ ids: list, @@ -846,6 +858,8 @@ type PatchObjectPrm struct { ReplaceAttribute bool + NewSplitHeader *objectSDK.SplitHeader + PayloadPatches []PayloadPatch } @@ -876,7 +890,11 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { return nil, fmt.Errorf("init payload reading: %w", err) } - if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) { + if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ + NewSplitHeader: prm.NewSplitHeader, + NewAttributes: prm.NewAttributes, + ReplaceAttributes: prm.ReplaceAttribute, + }) { for _, pp := range prm.PayloadPatches { payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) if err != nil { diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 2d9c45cbd..1eadfa2e1 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go index 88321176f..6ed21e107 100644 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ b/cmd/frostfs-cli/internal/commonflags/api.go @@ -9,7 +9,7 @@ const ( TTL = "ttl" TTLShorthand = "" TTLDefault = 2 - TTLUsage = "TTL value in request meta header" + TTLUsage = "The maximum number of intermediate nodes in the request route" XHeadersKey = "xhdr" XHeadersShorthand = "x" diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go index cd46d63eb..fad1f6183 100644 --- a/cmd/frostfs-cli/internal/commonflags/flags.go +++ b/cmd/frostfs-cli/internal/commonflags/flags.go @@ -28,7 +28,7 @@ const ( RPC = "rpc-endpoint" RPCShorthand = "r" RPCDefault = "" - RPCUsage = "Remote node address (as 'multiaddr' or ':')" + RPCUsage = "Remote node address (':' or 'grpcs://:')" Timeout = "timeout" TimeoutShorthand = "t" diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go index a86506c37..0927788ba 100644 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ b/cmd/frostfs-cli/modules/bearer/create.go @@ -44,6 +44,7 @@ is set to current epoch + n. _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) + _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) }, } @@ -81,7 +82,7 @@ func createToken(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) if iatRelative || expRelative || nvbRelative { - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) if len(endpoint) == 0 { commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) } diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index 13fe07995..9632061f1 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) { outputPath, _ := cmd.Flags().GetString(outputFlag) if outputPath != "" { - err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644) + err := os.WriteFile(outputPath, overrideMarshalled, 0o644) commonCmd.ExitOnErr(cmd, "dump error: %w", err) } else { fmt.Print("\n") diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go index 8c4ab14f8..fac6eb2cd 100644 --- a/cmd/frostfs-cli/modules/container/get.go +++ b/cmd/frostfs-cli/modules/container/get.go @@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod cmd.Println("created:", container.CreatedAt(cnr)) cmd.Println("attributes:") - cnr.IterateAttributes(func(key, val string) { + for key, val := range cnr.Attributes() { cmd.Printf("\t%s=%s\n", key, val) - }) + } cmd.Println("placement policy:") commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index f01e4db4d..e4a023d91 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -6,8 +6,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // flags of list command. @@ -51,44 +54,60 @@ var listContainersCmd = &cobra.Command{ var prm internalclient.ListContainersPrm prm.SetClient(cli) - prm.Account = idUser - - res, err := internalclient.ListContainers(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - + prm.OwnerID = idUser prmGet := internalclient.GetContainerPrm{ Client: cli, } + var containerIDs []cid.ID + + err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool { + printContainer(cmd, prmGet, id) + return false + }) + if err == nil { + return + } + + if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented { + res, err := internalclient.ListContainers(cmd.Context(), prm) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + containerIDs = res.SortedIDList() + } else { + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + } - containerIDs := res.SortedIDList() for _, cnrID := range containerIDs { - if flagVarListName == "" && !flagVarListPrintAttr { - cmd.Println(cnrID.String()) - continue - } - - prmGet.ClientParams.ContainerID = &cnrID - res, err := internalclient.GetContainer(cmd.Context(), prmGet) - if err != nil { - cmd.Printf(" failed to read attributes: %v\n", err) - continue - } - - cnr := res.Container() - if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { - continue - } - cmd.Println(cnrID.String()) - - if flagVarListPrintAttr { - cnr.IterateUserAttributes(func(key, val string) { - cmd.Printf(" %s: %s\n", key, val) - }) - } + printContainer(cmd, prmGet, cnrID) } }, } +func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) { + if flagVarListName == "" && !flagVarListPrintAttr { + cmd.Println(id.String()) + return + } + + prmGet.ClientParams.ContainerID = &id + res, err := internalclient.GetContainer(cmd.Context(), prmGet) + if err != nil { + cmd.Printf(" failed to read attributes: %v\n", err) + return + } + + cnr := res.Container() + if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { + return + } + cmd.Println(id.String()) + + if flagVarListPrintAttr { + for key, val := range cnr.Attributes() { + cmd.Printf(" %s: %s\n", key, val) + } + } +} + func initContainerListContainersCmd() { commonflags.Init(listContainersCmd) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 40bd4110b..cf4862b4a 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -5,7 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "maps" "os" + "slices" "strings" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -19,15 +21,16 @@ import ( ) type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo + cmd *cobra.Command + nodes map[string]netmap.NodeInfo + console *readline.Instance } -func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) { +func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { return &policyPlaygroundREPL{ cmd: cmd, nodes: map[string]netmap.NodeInfo{}, - }, nil + } } func (repl *policyPlaygroundREPL) handleLs(args []string) error { @@ -37,10 +40,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { i := 1 for id, node := range repl.nodes { var attrs []string - node.IterateAttributes(func(k, v string) { + for k, v := range node.Attributes() { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - }) - fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) + } + fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } return nil @@ -147,12 +150,29 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { for _, node := range ns { ids = append(ids, hex.EncodeToString(node.PublicKey())) } - fmt.Printf("\t%2d: %v\n", i+1, ids) + fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) } return nil } +func (repl *policyPlaygroundREPL) handleHelp(args []string) error { + if len(args) != 0 { + if _, ok := commands[args[0]]; !ok { + return fmt.Errorf("unknown command: %q", args[0]) + } + fmt.Fprintln(repl.console, commands[args[0]].usage) + return nil + } + + commandList := slices.Collect(maps.Keys(commands)) + slices.Sort(commandList) + for _, command := range commandList { + fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) + } + return nil +} + func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { var nm netmap.NetMap var nodes []netmap.NodeInfo @@ -163,15 +183,104 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } -var policyPlaygroundCompleter = readline.NewPrefixCompleter( - readline.PcItem("list"), - readline.PcItem("ls"), - readline.PcItem("add"), - readline.PcItem("load"), - readline.PcItem("remove"), - readline.PcItem("rm"), - readline.PcItem("eval"), -) +type commandDescription struct { + descriprion string + usage string +} + +var commands = map[string]commandDescription{ + "list": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + list + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "ls": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + ls + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "add": { + descriprion: "Add a new node: add attr=value", + usage: `Add a new node +Example of usage: + add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, + }, + + "load": { + descriprion: "Load netmap from file: load ", + usage: `Load netmap from file +Example of usage: + load "netmap.json" +File format (netmap.json): +{ + "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { + "continent": "Europe", + "country": "Poland" + }, + "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { + "continent": "Antarctica", + "country": "Heard Island" + } +}`, + }, + + "remove": { + descriprion: "Remove a node: remove ", + usage: `Remove a node +Example of usage: + remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "rm": { + descriprion: "Remove a node: rm ", + usage: `Remove a node +Example of usage: + rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "eval": { + descriprion: "Evaluate a policy: eval ", + usage: `Evaluate a policy +Example of usage: + eval REP 2`, + }, + + "help": { + descriprion: "Show available commands", + }, +} + +func (repl *policyPlaygroundREPL) handleCommand(args []string) error { + if len(args) == 0 { + return nil + } + + switch args[0] { + case "list", "ls": + return repl.handleLs(args[1:]) + case "add": + return repl.handleAdd(args[1:]) + case "load": + return repl.handleLoad(args[1:]) + case "remove", "rm": + return repl.handleRemove(args[1:]) + case "eval": + return repl.handleEval(args[1:]) + case "help": + return repl.handleHelp(args[1:]) + } + return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) +} func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { @@ -190,24 +299,32 @@ func (repl *policyPlaygroundREPL) run() error { } } - cmdHandlers := map[string]func([]string) error{ - "list": repl.handleLs, - "ls": repl.handleLs, - "add": repl.handleAdd, - "load": repl.handleLoad, - "remove": repl.handleRemove, - "rm": repl.handleRemove, - "eval": repl.handleEval, + if len(viper.GetString(netmapConfigPath)) > 0 { + err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) + commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) } + var cfgCompleter []readline.PrefixCompleterInterface + var helpSubItems []readline.PrefixCompleterInterface + + for name := range commands { + if name != "help" { + cfgCompleter = append(cfgCompleter, readline.PcItem(name)) + helpSubItems = append(helpSubItems, readline.PcItem(name)) + } + } + + cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) + completer := readline.NewPrefixCompleter(cfgCompleter...) rl, err := readline.NewEx(&readline.Config{ Prompt: "> ", InterruptPrompt: "^C", - AutoComplete: policyPlaygroundCompleter, + AutoComplete: completer, }) if err != nil { return fmt.Errorf("error initializing readline: %w", err) } + repl.console = rl defer rl.Close() var exit bool @@ -225,17 +342,8 @@ func (repl *policyPlaygroundREPL) run() error { } exit = false - parts := strings.Fields(line) - if len(parts) == 0 { - continue - } - cmd := parts[0] - if handler, exists := cmdHandlers[cmd]; exists { - if err := handler(parts[1:]); err != nil { - fmt.Printf("error: %v\n", err) - } - } else { - fmt.Printf("error: unknown command %q\n", cmd) + if err := repl.handleCommand(strings.Fields(line)); err != nil { + fmt.Fprintf(repl.console, "error: %v\n", err) } } } @@ -246,12 +354,19 @@ var policyPlaygroundCmd = &cobra.Command{ Long: `A REPL for testing placement policies. If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, Run: func(cmd *cobra.Command, _ []string) { - repl, err := newPolicyPlaygroundREPL(cmd) - commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err) + repl := newPolicyPlaygroundREPL(cmd) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) }, } +const ( + netmapConfigPath = "netmap-config" + netmapConfigUsage = "Path to the netmap configuration file" +) + func initContainerPolicyPlaygroundCmd() { commonflags.Init(policyPlaygroundCmd) + policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) + + _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) } diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go deleted file mode 100644 index 1e48c1df4..000000000 --- a/cmd/frostfs-cli/modules/control/evacuate_shard.go +++ /dev/null @@ -1,56 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -const ignoreErrorsFlag = "no-errors" - -var evacuateShardCmd = &cobra.Command{ - Use: "evacuate", - Short: "Evacuate objects from shard", - Long: "Evacuate objects from shard to other shards", - Run: evacuateShard, - Deprecated: "use frostfs-cli control shards evacuation start", -} - -func evacuateShard(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)} - req.Body.Shard_ID = getShardIDList(cmd) - req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.EvacuateShardResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.EvacuateShard(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount()) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard has successfully been evacuated.") -} - -func initControlEvacuateShardCmd() { - initControlFlags(evacuateShardCmd) - - flags := evacuateShardCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - flags.Bool(shardAllFlag, false, "Process all shards") - flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects") - - evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 73700e56d..b8d7eb046 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -17,10 +17,11 @@ import ( ) const ( - awaitFlag = "await" - noProgressFlag = "no-progress" - scopeFlag = "scope" - repOneOnlyFlag = "rep-one-only" + awaitFlag = "await" + noProgressFlag = "no-progress" + scopeFlag = "scope" + repOneOnlyFlag = "rep-one-only" + ignoreErrorsFlag = "no-errors" containerWorkerCountFlag = "container-worker-count" objectWorkerCountFlag = "object-worker-count" @@ -295,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftMinutes := int(leftSeconds / 60) - sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes)) + fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) } func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -304,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR hour := int(duration.Seconds() / 3600) minute := int(duration.Seconds()/60) % 60 second := int(duration.Seconds()) % 60 - sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second)) + fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) } } func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if resp.GetBody().GetStartedAt() != nil { startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339))) + fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) } } func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if len(resp.GetBody().GetErrorMessage()) > 0 { - sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage())) + fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) } } @@ -331,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes default: status = "undefined" } - sb.WriteString(fmt.Sprintf(" Status: %s.", status)) + fmt.Fprintf(sb, " Status: %s.", status) } func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -349,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR } func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", + fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetTotalObjects(), resp.GetBody().GetFailedObjects(), resp.GetBody().GetSkippedObjects(), resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees())) + resp.GetBody().GetFailedTrees()) } func initControlEvacuationShardCmd() { diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 8bd2dc9cd..3142d02e7 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) { tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) _, _ = tw.Write([]byte("#\tName\tType\n")) for i, t := range targets { - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) } _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go new file mode 100644 index 000000000..4cb4be539 --- /dev/null +++ b/cmd/frostfs-cli/modules/control/locate.go @@ -0,0 +1,117 @@ +package control + +import ( + "bytes" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/mr-tron/base58" + "github.com/spf13/cobra" +) + +const ( + FullInfoFlag = "full" + FullInfoFlagUsage = "Print full ShardInfo." +) + +var locateObjectCmd = &cobra.Command{ + Use: "locate-object", + Short: "List shards storing the object", + Long: "List shards storing the object", + Run: locateObject, +} + +func initControlLocateObjectCmd() { + initControlFlags(locateObjectCmd) + + flags := locateObjectCmd.Flags() + + flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag) + + flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag) + + flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.") + flags.Bool(FullInfoFlag, false, FullInfoFlagUsage) +} + +func locateObject(cmd *cobra.Command, _ []string) { + var cnr cid.ID + var obj oid.ID + + _ = object.ReadObjectAddress(cmd, &cnr, &obj) + + pk := key.Get(cmd) + + body := new(control.ListShardsForObjectRequest_Body) + body.SetContainerId(cnr.EncodeToString()) + body.SetObjectId(obj.EncodeToString()) + req := new(control.ListShardsForObjectRequest) + req.SetBody(body) + signRequest(cmd, pk, req) + + cli := getClient(cmd, pk) + + var err error + var resp *control.ListShardsForObjectResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + resp, err = control.ListShardsForObject(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + shardIDs := resp.GetBody().GetShard_ID() + + isFull, _ := cmd.Flags().GetBool(FullInfoFlag) + if !isFull { + for _, id := range shardIDs { + cmd.Println(base58.Encode(id)) + } + return + } + + // get full shard info + listShardsReq := new(control.ListShardsRequest) + listShardsReq.SetBody(new(control.ListShardsRequest_Body)) + signRequest(cmd, pk, listShardsReq) + var listShardsResp *control.ListShardsResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + listShardsResp, err = control.ListShards(client, listShardsReq) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody()) + + shards := listShardsResp.GetBody().GetShards() + sortShardsByID(shards) + shards = filterShards(shards, shardIDs) + + isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) + if isJSON { + prettyPrintShardsJSON(cmd, shards) + } else { + prettyPrintShards(cmd, shards) + } +} + +func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo { + var res []control.ShardInfo + for _, id := range ids { + for _, inf := range info { + if bytes.Equal(inf.Shard_ID, id) { + res = append(res, inf) + } + } + } + return res +} diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go index b20d3618e..3abfe80cb 100644 --- a/cmd/frostfs-cli/modules/control/root.go +++ b/cmd/frostfs-cli/modules/control/root.go @@ -39,6 +39,7 @@ func init() { listRulesCmd, getRuleCmd, listTargetsCmd, + locateObjectCmd, ) initControlHealthCheckCmd() @@ -52,4 +53,5 @@ func init() { initControlListRulesCmd() initControGetRuleCmd() initControlListTargetsCmd() + initControlLocateObjectCmd() } diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go index 87c4f3b3d..26a1ba883 100644 --- a/cmd/frostfs-cli/modules/control/set_netmap_status.go +++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go @@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client. var resp *control.GetNetmapStatusResponse var err error err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.GetNetmapStatus(client, req) + resp, err = control.GetNetmapStatus(cmd.Context(), client, req) return err }) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go index 329cb9100..3483f5d62 100644 --- a/cmd/frostfs-cli/modules/control/shards.go +++ b/cmd/frostfs-cli/modules/control/shards.go @@ -13,7 +13,6 @@ var shardsCmd = &cobra.Command{ func initControlShardsCmd() { shardsCmd.AddCommand(listShardsCmd) shardsCmd.AddCommand(setShardModeCmd) - shardsCmd.AddCommand(evacuateShardCmd) shardsCmd.AddCommand(evacuationShardCmd) shardsCmd.AddCommand(flushCacheCmd) shardsCmd.AddCommand(doctorCmd) @@ -23,7 +22,6 @@ func initControlShardsCmd() { initControlShardsListCmd() initControlSetShardModeCmd() - initControlEvacuateShardCmd() initControlEvacuationShardCmd() initControlFlushCacheCmd() initControlDoctorCmd() diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go index 80e4a0c87..d0c9a641b 100644 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ b/cmd/frostfs-cli/modules/control/writecache.go @@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{ Use: "seal", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", - Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.", + Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", Run: sealWritecache, } diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index ae4bb329a..5da66dcd9 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -62,11 +62,11 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("state:", stateWord) - netmap.IterateNetworkEndpoints(i, func(s string) { + for s := range i.NetworkEndpoints() { cmd.Println("address:", s) - }) + } - i.IterateAttributes(func(key, value string) { + for key, value := range i.Attributes() { cmd.Printf("attribute: %s=%s\n", key, value) - }) + } } diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go index e4e9cddb8..08a9ac4c8 100644 --- a/cmd/frostfs-cli/modules/object/delete.go +++ b/cmd/frostfs-cli/modules/object/delete.go @@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) } - objAddr = readObjectAddress(cmd, &cnr, &obj) + objAddr = ReadObjectAddress(cmd, &cnr, &obj) } pk := key.GetOrGenerate(cmd) diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go index f1edccba2..7312f5384 100644 --- a/cmd/frostfs-cli/modules/object/get.go +++ b/cmd/frostfs-cli/modules/object/get.go @@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) filename := cmd.Flag(fileFlag).Value.String() out, closer := createOutWriter(cmd, filename) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index 26243e7e7..25df375d4 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/spf13/cobra" @@ -42,7 +41,9 @@ func initObjectHashCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String("range", "", "Range to take hash from in the form offset1:length1,...") + flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") + _ = objectHashCmd.MarkFlagRequired("range") + flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String(getRangeHashSaltFlag, "", "Salt in hex format") } @@ -51,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -66,36 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) { pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - tz := typ == hashTz - fullHash := len(ranges) == 0 - if fullHash { - var headPrm internalclient.HeadObjectPrm - headPrm.SetClient(cli) - Prepare(cmd, &headPrm) - headPrm.SetAddress(objAddr) - - // get hash of full payload through HEAD (may be user can do it through dedicated command?) - res, err := internalclient.HeadObject(cmd.Context(), headPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - var cs checksum.Checksum - var csSet bool - - if tz { - cs, csSet = res.Header().PayloadHomomorphicHash() - } else { - cs, csSet = res.Header().PayloadChecksum() - } - - if csSet { - cmd.Println(hex.EncodeToString(cs.Value())) - } else { - cmd.Println("Missing checksum in object header.") - } - - return - } - var hashPrm internalclient.HashPayloadRangesPrm hashPrm.SetClient(cli) Prepare(cmd, &hashPrm) @@ -104,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { hashPrm.SetSalt(salt) hashPrm.SetRanges(ranges) - if tz { + if typ == hashTz { hashPrm.TZ() } diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index 70c273443..97e996cad 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index 53dd01868..d67db9f0d 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -18,6 +18,7 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "github.com/spf13/viper" ) // object lock command. @@ -78,7 +79,7 @@ var objectLockCmd = &cobra.Command{ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index e6918dfc9..476238651 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -1,8 +1,6 @@ package object import ( - "bytes" - "cmp" "context" "crypto/ecdsa" "encoding/hex" @@ -51,6 +49,12 @@ type ecHeader struct { parent oid.ID } +type objectCounter struct { + sync.Mutex + total uint32 + isECcounted bool +} + type objectPlacement struct { requiredNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo @@ -59,6 +63,7 @@ type objectPlacement struct { type objectNodesResult struct { errors []error placements map[oid.ID]objectPlacement + total uint32 } type ObjNodesDataObject struct { @@ -104,23 +109,23 @@ func initObjectNodesCmd() { func objectNodes(cmd *cobra.Command, _ []string) { var cnrID cid.ID var objID oid.ID - readObjectAddress(cmd, &cnrID, &objID) + ReadObjectAddress(cmd, &cnrID, &objID) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - objects := getPhyObjects(cmd, cnrID, objID, cli, pk) + objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - getActualPlacement(cmd, netmap, pk, objects, result) + getActualPlacement(cmd, netmap, pk, objects, count, result) printPlacement(cmd, objID, objects, result) } -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject { +func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -148,7 +153,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C parent: res.Header().ECHeader().Parent(), } } - return []phyObject{obj} + return []phyObject{obj}, 1 } var errSplitInfo *objectSDK.SplitInfoError @@ -158,29 +163,34 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError) + return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 } commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil + return nil, 0 } -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject { - members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead) +func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { + members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) + return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total } -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID { +func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { + var total int splitInfo := errSplitInfo.SplitInfo() if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { - return members + if total = len(members); total > 0 { + total-- // linking object is not data object + } + return members, total } if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members + return members, len(members) } - return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + return members, len(members) } func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { @@ -323,7 +333,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem } placementBuilder := placement.NewNetworkMapBuilder(netmap) for _, object := range objects { - placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) for repIdx, rep := range placement { numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() @@ -361,7 +371,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem placementObjectID = object.ecHeader.parent } placementBuilder := placement.NewNetworkMapBuilder(netmap) - placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) for _, vector := range placement { @@ -386,8 +396,11 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem } } -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) { +func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { resultMtx := &sync.Mutex{} + counter := &objectCounter{ + total: uint32(count), + } candidates := getNodesToCheckObjectExistance(cmd, netmap, result) @@ -404,7 +417,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. for _, object := range objects { eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) + stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) resultMtx.Lock() defer resultMtx.Unlock() if err == nil && stored { @@ -423,6 +436,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) + result.total = counter.total } func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { @@ -447,17 +461,11 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N var cli *client.Client var addresses []string if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) addresses = append(addresses, candidate.ExternalAddresses()...) } else { addresses = append(addresses, candidate.ExternalAddresses()...) - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) } var lastErr error @@ -481,7 +489,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N return cli, nil } -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) { +func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -496,6 +504,14 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, res, err := internalclient.HeadObject(ctx, prmHead) if err == nil && res != nil { + if res.Header().ECHeader() != nil { + counter.Lock() + defer counter.Unlock() + if !counter.isECcounted { + counter.total *= res.Header().ECHeader().Total() + } + counter.isECcounted = true + } return true, nil } var notFound *apistatus.ObjectNotFound @@ -507,7 +523,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, } func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - normilizeObjectNodesResult(objects, result) if json, _ := cmd.Flags().GetBool(commonflags.JSON); json { printObjectNodesAsJSON(cmd, objID, objects, result) } else { @@ -515,36 +530,9 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } } -func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) { - slices.SortFunc(objects, func(lhs, rhs phyObject) int { - if lhs.ecHeader == nil && rhs.ecHeader == nil { - return bytes.Compare(lhs.objectID[:], rhs.objectID[:]) - } - if lhs.ecHeader == nil { - return -1 - } - if rhs.ecHeader == nil { - return 1 - } - if lhs.ecHeader.parent == rhs.ecHeader.parent { - return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index) - } - return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:]) - }) - for _, obj := range objects { - op := result.placements[obj.objectID] - slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int { - return bytes.Compare(lhs.PublicKey(), rhs.PublicKey()) - }) - slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int { - return bytes.Compare(lhs.PublicKey(), rhs.PublicKey()) - }) - result.placements[obj.objectID] = op - } -} - func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) + fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) + fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) for _, object := range objects { fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index 8f03885ab..ebbde76a2 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -2,6 +2,7 @@ package object import ( "fmt" + "os" "strconv" "strings" @@ -9,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -20,6 +22,7 @@ const ( replaceAttrsFlagName = "replace-attrs" rangeFlagName = "range" payloadFlagName = "payload" + splitHeaderFlagName = "split-header" ) var objectPatchCmd = &cobra.Command{ @@ -46,17 +49,18 @@ func initObjectPatchCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2") + flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") + flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") } func patch(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeSlice(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -84,6 +88,8 @@ func patch(cmd *cobra.Command, _ []string) { prm.NewAttributes = newAttrs prm.ReplaceAttribute = replaceAttrs + prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) + for i := range ranges { prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ Range: ranges[i], @@ -99,11 +105,9 @@ func patch(cmd *cobra.Command, _ []string) { } func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - var rawAttrs []string - - raw := cmd.Flag(newAttrsFlagName).Value.String() - if len(raw) != 0 { - rawAttrs = strings.Split(raw, ",") + rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) + if err != nil { + return nil, err } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes @@ -149,3 +153,22 @@ func patchPayloadPaths(cmd *cobra.Command) []string { v, _ := cmd.Flags().GetStringSlice(payloadFlagName) return v } + +func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { + path, _ := cmd.Flags().GetString(splitHeaderFlagName) + if path == "" { + return nil + } + + data, err := os.ReadFile(path) + commonCmd.ExitOnErr(cmd, "read file error: %w", err) + + splitHdrV2 := new(objectV2.SplitHeader) + err = splitHdrV2.Unmarshal(data) + if err != nil { + err = splitHdrV2.UnmarshalJSON(data) + commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) + } + + return objectSDK.NewSplitHeaderFromV2(splitHdrV2) +} diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go index affe9bbba..9e8a7cc6f 100644 --- a/cmd/frostfs-cli/modules/object/put.go +++ b/cmd/frostfs-cli/modules/object/put.go @@ -50,7 +50,7 @@ func initObjectPutCmd() { flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2") + flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") @@ -214,11 +214,9 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute { } func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - var rawAttrs []string - - raw := cmd.Flag("attributes").Value.String() - if len(raw) != 0 { - rawAttrs = strings.Split(raw, ",") + rawAttrs, err := cmd.Flags().GetStringSlice("attributes") + if err != nil { + return nil, err } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index ad4bc3d59..6ec508ae2 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -38,7 +38,7 @@ func initObjectRangeCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String("range", "", "Range to take data from in the form offset:length") + flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.Bool(rawFlag, false, rawFlagDesc) } @@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) @@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool { if ok { toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toProto, _ := cmd.Flags().GetBool("proto") - if !(toJSON || toProto) { + if !toJSON && !toProto { cmd.PrintErrln("Object is erasure-encoded, ec information received.") } printECInfo(cmd, errECInfo.ECInfo()) @@ -195,11 +195,10 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) { } func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { - v := cmd.Flag("range").Value.String() - if len(v) == 0 { - return nil, nil + vs, err := cmd.Flags().GetStringSlice("range") + if len(vs) == 0 || err != nil { + return nil, err } - vs := strings.Split(v, ",") rs := make([]objectSDK.Range, len(vs)) for i := range vs { before, after, found := strings.Cut(vs[i], rangeSep) diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index b090c9f8c..8e4e8b287 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string { return xs } -func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { +func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { readCID(cmd, cnr) readOID(cmd, obj) @@ -262,13 +262,8 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client if _, ok := dst.(*internal.DeleteObjectPrm); ok { common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - rels := collectObjectRelatives(cmd, cli, cnr, *obj) - - if len(rels) == 0 { - objs = []oid.ID{*obj} - } else { - objs = append(rels, *obj) - } + objs = collectObjectRelatives(cmd, cli, cnr, *obj) + objs = append(objs, *obj) } } diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index a70624ac8..d71a94b98 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,18 +2,19 @@ package tree import ( "context" + "crypto/tls" "fmt" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -32,23 +33,29 @@ func _client() (tree.TreeServiceClient, error) { return nil, err } + host, isTLS, err := client.ParseURI(netAddr.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), + grpc.WithTransportCredentials(creds), } - if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) + cc, err := grpc.NewClient(host, opts...) return tree.NewTreeServiceClient(cc), err } diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 09af08525..13a747ba6 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -4,11 +4,14 @@ import ( "context" "os" "os/signal" + "strconv" "syscall" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "github.com/spf13/cast" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -38,13 +41,33 @@ func reloadConfig() error { } cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) audit.Store(cfg.GetBool("audit.enabled")) + var logPrm logger.Prm err = logPrm.SetLevelString(cfg.GetString("logger.level")) if err != nil { return err } - logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + err = logPrm.SetTags(loggerTags()) + if err != nil { + return err + } + logger.UpdateLevelForTags(logPrm) - return logPrm.Reload() + return nil +} + +func loggerTags() [][]string { + var res [][]string + for i := 0; ; i++ { + var item []string + index := strconv.FormatInt(int64(i), 10) + names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) + if names == "" { + break + } + item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) + res = append(res, item) + } + return res } func watchForSignal(ctx context.Context, cancel func()) { diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go index a8eef6010..dd70fc91c 100644 --- a/cmd/frostfs-ir/httpcomponent.go +++ b/cmd/frostfs-ir/httpcomponent.go @@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) { log.Info(ctx, c.name+" config updated") if err := c.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + zap.Error(err), ) } else { c.init(ctx) diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index e86c04b9e..799feb784 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -31,7 +31,6 @@ const ( var ( wg = new(sync.WaitGroup) intErr = make(chan error) // internal inner ring errors - logPrm = new(logger.Prm) innerRing *innerring.Server pprofCmp *pprofComponent metricsCmp *httpComponent @@ -70,6 +69,7 @@ func main() { metrics := irMetrics.NewInnerRingMetrics() + var logPrm logger.Prm err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) @@ -80,10 +80,14 @@ func main() { exitErr(err) logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + err = logPrm.SetTags(loggerTags()) + exitErr(err) log, err = logger.NewLogger(logPrm) exitErr(err) + logger.UpdateLevelForTags(logPrm) + ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() @@ -119,12 +123,12 @@ func shutdown(ctx context.Context) { innerRing.Stop(ctx) if err := metricsCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + zap.Error(err), ) } if err := pprofCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go index 8e81d8b85..2aebcde7f 100644 --- a/cmd/frostfs-ir/pprof.go +++ b/cmd/frostfs-ir/pprof.go @@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) { log.Info(ctx, c.name+" config updated") if err := c.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error())) + zap.Error(err)) return } diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go index 5a41f945c..7b0e25f3d 100644 --- a/cmd/frostfs-lens/internal/meta/tui.go +++ b/cmd/frostfs-lens/internal/meta/tui.go @@ -2,13 +2,17 @@ package meta import ( "context" + "encoding/binary" + "errors" "fmt" common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" + schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" + "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -27,6 +31,11 @@ Available search filters: var initialPrompt string +var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{ + 2: schema.MetabaseParserV2, + 3: schema.MetabaseParserV3, +} + func init() { common.AddComponentPathFlag(tuiCMD, &vPath) @@ -49,12 +58,22 @@ func runTUI(cmd *cobra.Command) error { } defer db.Close() + schemaVersion, hasVersion := lookupSchemaVersion(cmd, db) + if !hasVersion { + return errors.New("couldn't detect schema version") + } + + metabaseParser, ok := parserPerSchemaVersion[schemaVersion] + if !ok { + return fmt.Errorf("unknown schema version %d", schemaVersion) + } + // Need if app was stopped with Ctrl-C. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil) + ui := tui.NewUI(ctx, app, db, metabaseParser, nil) _ = ui.AddFilter("cid", tui.CIDParser, "CID") _ = ui.AddFilter("oid", tui.OIDParser, "OID") @@ -69,3 +88,31 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } + +var ( + shardInfoBucket = []byte{5} + versionRecord = []byte("version") +) + +func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) { + err := db.View(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(shardInfoBucket) + if bkt == nil { + return nil + } + rec := bkt.Get(versionRecord) + if rec == nil { + return nil + } + + version = binary.LittleEndian.Uint64(rec) + ok = true + + return nil + }) + if err != nil { + common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err)) + } + + return +} diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go index 9bad19032..077a68785 100644 --- a/cmd/frostfs-lens/internal/schema/common/schema.go +++ b/cmd/frostfs-lens/internal/schema/common/schema.go @@ -3,6 +3,8 @@ package common import ( "errors" "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) type FilterResult byte @@ -71,11 +73,7 @@ func (fp FallbackParser) ToParser() Parser { func (p Parser) ToFallbackParser() FallbackParser { return func(key, value []byte) (SchemaEntry, Parser) { entry, next, err := p(key, value) - if err != nil { - panic(fmt.Errorf( - "couldn't use that parser as a fallback parser, it returned an error: %w", err, - )) - } + assert.NoError(err, "couldn't use that parser as a fallback parser") return entry, next } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go index 24cc0e52d..4e6bbf08a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go @@ -80,10 +80,15 @@ var ( }, ) - UserAttributeParser = NewUserAttributeKeyBucketParser( + UserAttributeParserV2 = NewUserAttributeKeyBucketParser( NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), ) + UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys( + NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), + []string{"FilePath", "S3-Access-Box-CRDT-Name"}, + ) + PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ cidResolver: StrictResolver, oidResolver: StrictResolver, @@ -108,4 +113,14 @@ var ( cidResolver: StrictResolver, oidResolver: LenientResolver, }) + + ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }) + + ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) ) diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go index 2fb122940..42a24c594 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go @@ -22,27 +22,31 @@ const ( Split ContainerCounters ECInfo + ExpirationEpochToObject + ObjectToExpirationEpoch ) var x = map[Prefix]string{ - Graveyard: "Graveyard", - Garbage: "Garbage", - ToMoveIt: "To Move It", - ContainerVolume: "Container Volume", - Locked: "Locked", - ShardInfo: "Shard Info", - Primary: "Primary", - Lockers: "Lockers", - Tombstone: "Tombstone", - Small: "Small", - Root: "Root", - Owner: "Owner", - UserAttribute: "User Attribute", - PayloadHash: "Payload Hash", - Parent: "Parent", - Split: "Split", - ContainerCounters: "Container Counters", - ECInfo: "EC Info", + Graveyard: "Graveyard", + Garbage: "Garbage", + ToMoveIt: "To Move It", + ContainerVolume: "Container Volume", + Locked: "Locked", + ShardInfo: "Shard Info", + Primary: "Primary", + Lockers: "Lockers", + Tombstone: "Tombstone", + Small: "Small", + Root: "Root", + Owner: "Owner", + UserAttribute: "User Attribute", + PayloadHash: "Payload Hash", + Parent: "Parent", + Split: "Split", + ContainerCounters: "Container Counters", + ECInfo: "EC Info", + ExpirationEpochToObject: "Exp. Epoch to Object", + ObjectToExpirationEpoch: "Object to Exp. Epoch", } func (p Prefix) String() string { diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go index db90bddbd..62d126f88 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go @@ -9,7 +9,7 @@ import ( func (b *PrefixBucket) String() string { return common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ) } @@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string { return fmt.Sprintf( "%s CID %s", common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple(b.id.String(), tcell.ColorAqua), ) @@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string { func (b *UserAttributeKeyBucket) String() string { return fmt.Sprintf("%s CID %s ATTR-KEY %s", common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple( fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go index 82b47dd85..7355c3d9e 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go @@ -2,6 +2,7 @@ package buckets import ( "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -57,10 +58,11 @@ var ( ) var ( - ErrNotBucket = errors.New("not a bucket") - ErrInvalidKeyLength = errors.New("invalid key length") - ErrInvalidValueLength = errors.New("invalid value length") - ErrInvalidPrefix = errors.New("invalid prefix") + ErrNotBucket = errors.New("not a bucket") + ErrInvalidKeyLength = errors.New("invalid key length") + ErrInvalidValueLength = errors.New("invalid value length") + ErrInvalidPrefix = errors.New("invalid prefix") + ErrUnexpectedAttributeKey = errors.New("unexpected attribute key") ) func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { @@ -132,6 +134,10 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa } func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { + return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil) +} + +func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser { return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { if value != nil { return nil, nil, ErrNotBucket @@ -147,6 +153,11 @@ func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { return nil, nil, err } b.key = string(key[33:]) + + if len(keys) != 0 && !slices.Contains(keys, b.key) { + return nil, nil, ErrUnexpectedAttributeKey + } + return &b, next, nil } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go index ea095e207..4cc9e8765 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/parser.go +++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go @@ -5,7 +5,30 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" ) -var MetabaseParser = common.WithFallback( +var MetabaseParserV3 = common.WithFallback( + common.Any( + buckets.GraveyardParser, + buckets.GarbageParser, + buckets.ContainerVolumeParser, + buckets.LockedParser, + buckets.ShardInfoParser, + buckets.PrimaryParser, + buckets.LockersParser, + buckets.TombstoneParser, + buckets.SmallParser, + buckets.RootParser, + buckets.UserAttributeParserV3, + buckets.ParentParser, + buckets.SplitParser, + buckets.ContainerCountersParser, + buckets.ECInfoParser, + buckets.ExpirationEpochToObjectParser, + buckets.ObjectToExpirationEpochParser, + ), + common.RawParser.ToFallbackParser(), +) + +var MetabaseParserV2 = common.WithFallback( common.Any( buckets.GraveyardParser, buckets.GarbageParser, @@ -18,7 +41,7 @@ var MetabaseParser = common.WithFallback( buckets.SmallParser, buckets.RootParser, buckets.OwnerParser, - buckets.UserAttributeParser, + buckets.UserAttributeParserV2, buckets.PayloadHashParser, buckets.ParentParser, buckets.SplitParser, diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go index 2dda15b4f..477c4fc9d 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go @@ -63,3 +63,11 @@ func (r *ContainerCountersRecord) DetailedString() string { func (r *ECInfoRecord) DetailedString() string { return spew.Sdump(*r) } + +func (r *ExpirationEpochToObjectRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ObjectToExpirationEpochRecord) DetailedString() string { + return spew.Sdump(*r) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go index 880a7a8ff..e038911d7 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go @@ -143,3 +143,26 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult { return common.No } } + +func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go index 1b070e2a0..5d846cb75 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go @@ -249,3 +249,45 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e } return &r, nil, nil } + +func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 72 { + return nil, nil, ErrInvalidKeyLength + } + + var ( + r ExpirationEpochToObjectRecord + err error + ) + + r.epoch = binary.BigEndian.Uint64(key[:8]) + if err = r.cnt.Decode(key[8:40]); err != nil { + return nil, nil, err + } + if err = r.obj.Decode(key[40:]); err != nil { + return nil, nil, err + } + + return &r, nil, nil +} + +func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + if len(value) != 8 { + return nil, nil, ErrInvalidValueLength + } + + var ( + r ObjectToExpirationEpochRecord + err error + ) + + if err = r.obj.Decode(key); err != nil { + return nil, nil, err + } + r.epoch = binary.LittleEndian.Uint64(value) + + return &r, nil, nil +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go index ec0ab8e1a..f71244625 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go @@ -2,6 +2,7 @@ package records import ( "fmt" + "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "github.com/gdamore/tcell/v2" @@ -133,3 +134,22 @@ func (r *ECInfoRecord) String() string { len(r.ids), ) } + +func (r *ExpirationEpochToObjectRecord) String() string { + return fmt.Sprintf( + "exp. epoch %s %c CID %s OID %s", + common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + ) +} + +func (r *ObjectToExpirationEpochRecord) String() string { + return fmt.Sprintf( + "OID %s %c exp. epoch %s", + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua), + ) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go index 34c1c29fd..0809cad1a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go @@ -79,4 +79,15 @@ type ( id oid.ID ids []oid.ID } + + ExpirationEpochToObjectRecord struct { + epoch uint64 + cnt cid.ID + obj oid.ID + } + + ObjectToExpirationEpochRecord struct { + obj oid.ID + epoch uint64 + } ) diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go index 7d70b27b2..3bfe2608b 100644 --- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go +++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go @@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, r.addr.SetContainer(cnr) r.addr.SetObject(obj) - r.data = value[:] + r.data = value return &r, nil, nil } diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go index 3f5088e7a..2d3b20792 100644 --- a/cmd/frostfs-lens/internal/tui/buckets.go +++ b/cmd/frostfs-lens/internal/tui/buckets.go @@ -124,10 +124,7 @@ func (v *BucketsView) loadNodeChildren( path := parentBucket.Path parser := parentBucket.NextParser - buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) - if err != nil { - return err - } + buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) for item := range buffer { if item.err != nil { @@ -135,6 +132,7 @@ func (v *BucketsView) loadNodeChildren( } bucket := item.val + var err error bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) if err != nil { return err @@ -180,10 +178,7 @@ func (v *BucketsView) bucketSatisfiesFilter( defer cancel() // Check the current bucket's nested buckets if exist - bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) - if err != nil { - return false, err - } + bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) for item := range bucketsBuffer { if item.err != nil { @@ -191,6 +186,7 @@ func (v *BucketsView) bucketSatisfiesFilter( } b := item.val + var err error b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) if err != nil { return false, err @@ -206,10 +202,7 @@ func (v *BucketsView) bucketSatisfiesFilter( } // Check the current bucket's nested records if exist - recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) - if err != nil { - return false, err - } + recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) for item := range recordsBuffer { if item.err != nil { @@ -217,6 +210,7 @@ func (v *BucketsView) bucketSatisfiesFilter( } r := item.val + var err error r.Entry, _, err = bucket.NextParser(r.Key, r.Value) if err != nil { return false, err diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go index d0cf611d4..94fa87f98 100644 --- a/cmd/frostfs-lens/internal/tui/db.go +++ b/cmd/frostfs-lens/internal/tui/db.go @@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) { func load[T any]( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, filter func(key, value []byte) bool, transform func(key, value []byte) T, -) (<-chan Item[T], error) { +) <-chan Item[T] { buffer := make(chan Item[T], bufferSize) go func() { @@ -77,13 +77,13 @@ func load[T any]( } }() - return buffer, nil + return buffer } func LoadBuckets( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) (<-chan Item[*Bucket], error) { - buffer, err := load( +) <-chan Item[*Bucket] { + buffer := load( ctx, db, path, bufferSize, func(_, value []byte) bool { return value == nil @@ -98,17 +98,14 @@ func LoadBuckets( } }, ) - if err != nil { - return nil, fmt.Errorf("can't start iterating bucket: %w", err) - } - return buffer, nil + return buffer } func LoadRecords( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) (<-chan Item[*Record], error) { - buffer, err := load( +) <-chan Item[*Record] { + buffer := load( ctx, db, path, bufferSize, func(_, value []byte) bool { return value != nil @@ -124,11 +121,8 @@ func LoadRecords( } }, ) - if err != nil { - return nil, fmt.Errorf("can't start iterating bucket: %w", err) - } - return buffer, nil + return buffer } // HasBuckets checks if a bucket has nested buckets. It relies on assumption @@ -137,24 +131,21 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) ctx, cancel := context.WithCancel(ctx) defer cancel() - buffer, err := load( + buffer := load( ctx, db, path, 1, nil, func(_, value []byte) []byte { return value }, ) - if err != nil { - return false, err - } x, ok := <-buffer if !ok { return false, nil } if x.err != nil { - return false, err + return false, x.err } if x.val != nil { - return false, err + return false, nil } return true, nil } diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go index 4fdf97119..471514e5d 100644 --- a/cmd/frostfs-lens/internal/tui/input.go +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -1,6 +1,8 @@ package tui import ( + "slices" + "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -26,7 +28,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) { // Used history data for search prompt, so just make that data recent. if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { - f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...) + f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) f.history = append(f.history, s) } @@ -51,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo f.historyPointer++ // Stop iterating over history. if f.historyPointer == len(f.history) { - f.InputField.SetText(f.currentContent) + f.SetText(f.currentContent) return } - f.InputField.SetText(f.history[f.historyPointer]) + f.SetText(f.history[f.historyPointer]) case tcell.KeyUp: if len(f.history) == 0 { return } // Start iterating over history. if f.historyPointer == len(f.history) { - f.currentContent = f.InputField.GetText() + f.currentContent = f.GetText() } // End of history. if f.historyPointer == 0 { @@ -69,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo } // Iterate to least recent prompts. f.historyPointer-- - f.InputField.SetText(f.history[f.historyPointer]) + f.SetText(f.history[f.historyPointer]) default: f.InputField.InputHandler()(event, func(tview.Primitive) {}) } diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go index 5f53ed287..a4d392ab3 100644 --- a/cmd/frostfs-lens/internal/tui/records.go +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -8,6 +8,7 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -62,10 +63,7 @@ func (v *RecordsView) Mount(ctx context.Context) error { ctx, v.onUnmount = context.WithCancel(ctx) - tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) - if err != nil { - return err - } + tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) v.buffer = make(chan *Record, v.ui.loadBufferSize) go func() { @@ -73,11 +71,12 @@ func (v *RecordsView) Mount(ctx context.Context) error { for item := range tempBuffer { if item.err != nil { - v.ui.stopOnError(err) + v.ui.stopOnError(item.err) break } record := item.val + var err error record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) if err != nil { v.ui.stopOnError(err) @@ -96,9 +95,7 @@ func (v *RecordsView) Mount(ctx context.Context) error { } func (v *RecordsView) Unmount() { - if v.onUnmount == nil { - panic("try to unmount not mounted component") - } + assert.False(v.onUnmount == nil, "try to unmount not mounted component") v.onUnmount() v.onUnmount = nil } diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go index bcc082821..cc6b7859e 100644 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { return } - switch ui.mountedPage.(type) { + switch v := ui.mountedPage.(type) { case *BucketsView: ui.moveNextPage(NewBucketsView(ui, res)) case *RecordsView: - bucket := ui.mountedPage.(*RecordsView).bucket + bucket := v.bucket ui.moveNextPage(NewRecordsView(ui, bucket, res)) } @@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) } - ui.Box.MouseHandler() + ui.MouseHandler() } func (ui *UI) WithPrompt(prompt string) error { diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index de3aed660..513314712 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -14,11 +14,12 @@ import ( func initAPEManagerService(c *cfg) { contractStorage := ape_contract.NewProxyVerificationContractStorage( morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.shared.key, + c.key, c.cfgMorph.proxyScriptHash, c.cfgObject.cfgAccessPolicyEngine.policyContractHash) execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, + c.cfgMorph.client, apemanager.WithLogger(c.log)) sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go index 64c3beba7..ce8ae9662 100644 --- a/cmd/frostfs-node/attributes.go +++ b/cmd/frostfs-node/attributes.go @@ -6,9 +6,5 @@ import ( ) func parseAttributes(c *cfg) { - if nodeconfig.Relay(c.appCfg) { - return - } - fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) } diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index b90641799..e5df0a22d 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,22 +1,30 @@ package main import ( + "bytes" + "cmp" + "context" + "slices" "sync" + "sync/atomic" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/hashicorp/golang-lru/v2/simplelru" + "go.uber.org/zap" ) -type netValueReader[K any, V any] func(K) (V, error) +type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type valueWithError[V any] struct { v V @@ -49,7 +57,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n // updates the value from the network on cache miss or by TTL. // // returned value should not be modified. -func (c *ttlNetCache[K, V]) get(key K) (V, error) { +func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { hit := false startedAt := time.Now() defer func() { @@ -71,7 +79,7 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) { return val.v, val.e } - v, err := c.netRdr(key) + v, err := c.netRdr(ctx, key) c.cache.Add(key, &valueWithError[V]{ v: v, @@ -109,55 +117,6 @@ func (c *ttlNetCache[K, V]) remove(key K) { hit = c.cache.Remove(key) } -// entity that provides LRU cache interface. -type lruNetCache struct { - cache *lru.Cache[uint64, *netmapSDK.NetMap] - - netRdr netValueReader[uint64, *netmapSDK.NetMap] - - metrics cacheMetrics -} - -// newNetworkLRUCache returns wrapper over netValueReader with LRU cache. -func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache { - cache, err := lru.New[uint64, *netmapSDK.NetMap](sz) - fatalOnErr(err) - - return &lruNetCache{ - cache: cache, - netRdr: netRdr, - metrics: metrics, - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - val, ok := c.cache.Get(key) - if ok { - hit = true - return val, nil - } - - val, err := c.netRdr(key) - if err != nil { - return nil, err - } - - c.cache.Add(key, val) - - return val, nil -} - // wrapper over TTL cache of values read from the network // that implements container storage. type ttlContainerStorage struct { @@ -166,11 +125,11 @@ type ttlContainerStorage struct { } func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { - lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) { - return v.Get(id) + lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { + return v.Get(ctx, id) }, metrics.NewCacheMetrics("container")) - lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) { - return v.DeletionInfo(id) + lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + return v.DeletionInfo(ctx, id) }, metrics.NewCacheMetrics("container_deletion_info")) return ttlContainerStorage{ @@ -188,43 +147,245 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) { // Get returns container value from the cache. If value is missing in the cache // or expired, then it returns value from side chain and updates the cache. -func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) { - return s.containerCache.get(cnr) +func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { + return s.containerCache.get(ctx, cnr) } -func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) { - return s.delInfoCache.get(cnr) +func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { + return s.delInfoCache.get(ctx, cnr) } type lruNetmapSource struct { netState netmap.State - cache *lruNetCache + client rawSource + cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] + mtx sync.RWMutex + metrics cacheMetrics + log *logger.Logger + candidates atomic.Pointer[[]netmapSDK.NodeInfo] } -func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { +type rawSource interface { + GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) +} + +func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, + netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, +) netmap.Source { const netmapCacheSize = 10 - lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) { - return v.GetNetMapByEpoch(key) - }, metrics.NewCacheMetrics("netmap")) + cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) + fatalOnErr(err) - return &lruNetmapSource{ - netState: s, - cache: lruNetmapCache, + src := &lruNetmapSource{ + netState: netState, + client: client, + cache: cache, + log: log, + metrics: metrics.NewCacheMetrics("netmap"), + } + + wg.Add(1) + go func() { + defer wg.Done() + src.updateCandidates(ctx, d) + }() + + return src +} + +// updateCandidates routine to merge netmap in cache with candidates list. +func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { + timer := time.NewTimer(d) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + newCandidates, err := s.client.GetCandidates(ctx) + if err != nil { + s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) + timer.Reset(d) + break + } + if len(newCandidates) == 0 { + s.candidates.Store(&newCandidates) + timer.Reset(d) + break + } + slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + return cmp.Compare(n1.Hash(), n2.Hash()) + }) + + // Check once state changed + v := s.candidates.Load() + if v == nil { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + timer.Reset(d) + break + } + ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || + uint32(n1.Status()) != uint32(n2.Status()) || + slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { + return 1 + } + ne1 := slices.Collect(n1.NetworkEndpoints()) + ne2 := slices.Collect(n2.NetworkEndpoints()) + return slices.Compare(ne1, ne2) + }) + if ret != 0 { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + } + timer.Reset(d) + } } } -func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff) +func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { + s.mtx.Lock() + tmp := s.cache.Values() + s.mtx.Unlock() + for _, pointer := range tmp { + nm := pointer.Load() + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + nm = nm.Clone() + mergeNetmapWithCandidates(updates, nm) + pointer.Store(nm) + } + } } -func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(epoch) +// reads value by the key. +// +// updates the value from the network on cache miss. +// +// returned value should not be modified. +func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + hit := false + startedAt := time.Now() + defer func() { + s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) + }() + + s.mtx.RLock() + val, ok := s.cache.Get(key) + s.mtx.RUnlock() + if ok { + hit = true + return val.Load(), nil + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + val, ok = s.cache.Get(key) + if ok { + hit = true + return val.Load(), nil + } + + nm, err := s.client.GetNetMapByEpoch(ctx, key) + if err != nil { + return nil, err + } + v := s.candidates.Load() + if v != nil { + updates := getNetMapNodesToUpdate(nm, *v) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + } + + p := atomic.Pointer[netmapSDK.NetMap]{} + p.Store(nm) + s.cache.Add(key, &p) + + return nm, nil } -func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.cache.get(epoch) +// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. +func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { + for _, v := range updates { + if v.status != netmapSDK.UnspecifiedState { + nm.Nodes()[v.netmapIndex].SetStatus(v.status) + } + if v.externalAddresses != nil { + nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) + } + if v.endpoints != nil { + nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) + } + } +} + +type nodeToUpdate struct { + netmapIndex int + status netmapSDK.NodeState + externalAddresses []string + endpoints []string +} + +// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. +func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { + var res []nodeToUpdate + for i := range nm.Nodes() { + for _, cnd := range candidates { + if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { + var tmp nodeToUpdate + var update bool + + if cnd.Status() != nm.Nodes()[i].Status() && + (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { + update = true + tmp.status = cnd.Status() + } + + externalAddresses := cnd.ExternalAddresses() + if externalAddresses != nil && + slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { + update = true + tmp.externalAddresses = externalAddresses + } + + nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) + nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) + candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) + candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) + if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { + update = true + tmp.endpoints = candidateEndpoints + } + + if update { + tmp.netmapIndex = i + res = append(res, tmp) + } + + break + } + } + } + return res +} + +func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) +} + +func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(ctx, epoch) +} + +func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + val, err := s.get(ctx, epoch) if err != nil { return nil, err } @@ -232,7 +393,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, err return val, nil } -func (s *lruNetmapSource) Epoch() (uint64, error) { +func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { return s.netState.CurrentEpoch(), nil } @@ -240,7 +401,10 @@ type cachedIRFetcher struct { *ttlNetCache[struct{}, [][]byte] } -func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher { +func newCachedIRFetcher(f interface { + InnerRingKeys(ctx context.Context) ([][]byte, error) +}, +) cachedIRFetcher { const ( irFetcherCacheSize = 1 // we intend to store only one value @@ -254,8 +418,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached ) irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, - func(_ struct{}) ([][]byte, error) { - return f.InnerRingKeys() + func(ctx context.Context, _ struct{}) ([][]byte, error) { + return f.InnerRingKeys(ctx) }, metrics.NewCacheMetrics("ir_keys"), ) @@ -265,8 +429,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // the cache or expired, then it returns keys from side chain and updates // the cache. -func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) { - val, err := f.get(struct{}{}) +func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { + val, err := f.get(ctx, struct{}{}) if err != nil { return nil, err } @@ -289,7 +453,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M } } -func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { +func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { const ttl = time.Second * 30 hit := false @@ -311,7 +475,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { c.mtx.Lock() size = c.lastSize if !c.lastUpdated.After(prevUpdated) { - size = c.src.MaxObjectSize() + size = c.src.MaxObjectSize(ctx) c.lastSize = size c.lastUpdated = time.Now() } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index f8c324a2f..24286826f 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -1,10 +1,13 @@ package main import ( + "context" "errors" + "sync" "testing" "time" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -17,7 +20,7 @@ func TestTTLNetCache(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.Equal(t, ti, val) }) @@ -26,7 +29,7 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) time.Sleep(2 * ttlDuration) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.NotEqual(t, val, ti) }) @@ -35,20 +38,20 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) cache.remove(key) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.NotEqual(t, val, ti) }) t.Run("Test Cache Error", func(t *testing.T) { cache.set("error", time.Now(), errors.New("mock error")) - _, err := cache.get("error") + _, err := cache.get(context.Background(), "error") require.Error(t, err) require.Equal(t, "mock error", err.Error()) }) } -func testNetValueReader(key string) (time.Time, error) { +func testNetValueReader(_ context.Context, key string) (time.Time, error) { if key == "error" { return time.Now(), errors.New("mock error") } @@ -58,3 +61,75 @@ func testNetValueReader(key string) (time.Time, error) { type noopCacheMetricts struct{} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} + +type rawSrc struct{} + +func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Online) + node0.SetExternalAddresses("1", "0") + node0.SetNetworkEndpoints("1", "0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Online) + node1.SetExternalAddresses("1", "0") + node1.SetNetworkEndpoints("1", "0") + + return []netmapSDK.NodeInfo{node0, node1}, nil +} + +func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm := netmapSDK.NetMap{} + nm.SetEpoch(1) + + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Maintenance) + node0.SetExternalAddresses("0") + node0.SetNetworkEndpoints("0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Maintenance) + node1.SetExternalAddresses("0") + node1.SetNetworkEndpoints("0") + + nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) + + return &nm, nil +} + +type st struct{} + +func (s *st) CurrentEpoch() uint64 { + return 1 +} + +func TestNetmapStorage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) + + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) + require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) + require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) + + require.Eventually(t, func() bool { + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + for _, node := range nm.Nodes() { + if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && + node.NumberOfNetworkEndpoints() == 2) { + return false + } + } + return true + }, time.Second*5, time.Millisecond*10) + + cancel() + wg.Wait() +} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 5af37865f..96274e625 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -30,15 +30,18 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -69,6 +72,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -106,6 +110,8 @@ type applicationConfiguration struct { level string destination string timestamp bool + options []zap.Option + tags [][]string } ObjectCfg struct { @@ -115,7 +121,6 @@ type applicationConfiguration struct { EngineCfg struct { errorThreshold uint32 - shardPoolSize uint32 shards []shardCfg lowMem bool } @@ -125,15 +130,13 @@ type applicationConfiguration struct { } type shardCfg struct { - compress bool - estimateCompressibility bool - estimateCompressibilityThreshold float64 + compression compression.Config smallSizeObjectLimit uint64 - uncompressableContentType []string refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode + limiter qos.Limiter metaCfg struct { path string @@ -230,62 +233,71 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) + var opts []zap.Option + if loggerconfig.ToLokiConfig(c).Enabled { + opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) + return lokiCore + })} + } + a.LoggerCfg.options = opts + a.LoggerCfg.tags = loggerconfig.Tags(c) // Object a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) - var pm []placement.Metric - for _, raw := range objectconfig.Get(c).Priority() { - m, err := placement.ParseMetric(raw) - if err != nil { - return err - } - pm = append(pm, m) + locodeDBPath := nodeconfig.LocodeDBPath(c) + parser, err := placement.NewMetricsParser(locodeDBPath) + if err != nil { + return fmt.Errorf("metrics parser creation: %w", err) } - a.ObjectCfg.priorityMetrics = pm + m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) + if err != nil { + return fmt.Errorf("parse metrics: %w", err) + } + a.ObjectCfg.priorityMetrics = m // Storage Engine a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) - a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) } -func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error { - var newConfig shardCfg +func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { + var target shardCfg - newConfig.refillMetabase = oldConfig.RefillMetabase() - newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount() - newConfig.mode = oldConfig.Mode() - newConfig.compress = oldConfig.Compress() - newConfig.estimateCompressibility = oldConfig.EstimateCompressibility() - newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold() - newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes() - newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit() + target.refillMetabase = source.RefillMetabase() + target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() + target.mode = source.Mode() + target.compression = source.Compression() + target.smallSizeObjectLimit = source.SmallSizeLimit() - a.setShardWriteCacheConfig(&newConfig, oldConfig) + a.setShardWriteCacheConfig(&target, source) - a.setShardPiloramaConfig(c, &newConfig, oldConfig) + a.setShardPiloramaConfig(c, &target, source) - if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil { + if err := a.setShardStorageConfig(&target, source); err != nil { return err } - a.setMetabaseConfig(&newConfig, oldConfig) + a.setMetabaseConfig(&target, source) - a.setGCConfig(&newConfig, oldConfig) + a.setGCConfig(&target, source) + if err := a.setLimiter(&target, source); err != nil { + return err + } - a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) + a.EngineCfg.shards = append(a.EngineCfg.shards, target) return nil } -func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - writeCacheCfg := oldConfig.WriteCache() +func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { + writeCacheCfg := source.WriteCache() if writeCacheCfg.Enabled() { - wc := &newConfig.writecacheCfg + wc := &target.writecacheCfg wc.enabled = true wc.path = writeCacheCfg.Path() @@ -298,10 +310,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, } } -func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) { +func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := oldConfig.Pilorama() - pr := &newConfig.piloramaCfg + piloramaCfg := source.Pilorama() + pr := &target.piloramaCfg pr.enabled = true pr.path = piloramaCfg.Path() @@ -312,8 +324,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC } } -func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error { - blobStorCfg := oldConfig.BlobStor() +func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { + blobStorCfg := source.BlobStor() storagesCfg := blobStorCfg.Storages() ss := make([]subStorageCfg, 0, len(storagesCfg)) @@ -347,13 +359,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol ss = append(ss, sCfg) } - newConfig.subStorages = ss + target.subStorages = ss return nil } -func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - metabaseCfg := oldConfig.Metabase() - m := &newConfig.metaCfg +func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { + metabaseCfg := source.Metabase() + m := &target.metaCfg m.path = metabaseCfg.Path() m.perm = metabaseCfg.BoltDB().Perm() @@ -361,12 +373,22 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() } -func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - gcCfg := oldConfig.GC() - newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { + gcCfg := source.GC() + target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() + target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() + target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() + target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +} + +func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { + limitsConfig := source.Limits().ToConfig() + limiter, err := qos.NewLimiter(limitsConfig) + if err != nil { + return err + } + target.limiter = limiter + return nil } // internals contains application-specific internals that are created @@ -456,7 +478,6 @@ type shared struct { // dynamicConfiguration stores parameters of the // components that supports runtime reconfigurations. type dynamicConfiguration struct { - logger *logger.Prm pprof *httpComponent metrics *httpComponent } @@ -493,6 +514,7 @@ type cfg struct { cfgNetmap cfgNetmap cfgControlService cfgControlService cfgObject cfgObject + cfgQoSService cfgQoSService } // ReadCurrentNetMap reads network map which has been cached at the @@ -527,6 +549,8 @@ type cfgGRPC struct { maxChunkSize uint64 maxAddrAmount uint64 reconnectTimeout time.Duration + + limiter atomic.Pointer[limiting.SemaphoreLimiter] } func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { @@ -591,8 +615,6 @@ type cfgMorph struct { client *client.Client - notaryEnabled bool - // TTL of Sidechain cached values. Non-positive value disables caching. cacheTTL time.Duration @@ -608,9 +630,10 @@ type cfgAccounting struct { type cfgContainer struct { scriptHash neogoutil.Uint160 - parsers map[event.Type]event.NotificationParser - subscribers map[event.Type][]event.Handler - workerPool util.WorkerPool // pool for asynchronous handlers + parsers map[event.Type]event.NotificationParser + subscribers map[event.Type][]event.Handler + workerPool util.WorkerPool // pool for asynchronous handlers + containerBatchSize uint32 } type cfgFrostfsID struct { @@ -628,7 +651,6 @@ type cfgNetmap struct { state *networkState - needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime } @@ -664,10 +686,6 @@ type cfgAccessPolicyEngine struct { } type cfgObjectRoutines struct { - putRemote *ants.Pool - - putLocal *ants.Pool - replication *ants.Pool } @@ -691,11 +709,9 @@ func initCfg(appCfg *config.Config) *cfg { key := nodeconfig.Key(appCfg) - relayOnly := nodeconfig.Relay(appCfg) - netState := newNetworkState() - c.shared = initShared(appCfg, key, netState, relayOnly) + c.shared = initShared(appCfg, key, netState) netState.metrics = c.metricsCollector @@ -704,12 +720,7 @@ func initCfg(appCfg *config.Config) *cfg { logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) - if loggerconfig.ToLokiConfig(appCfg).Enabled { - log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) - return lokiCore - })) - } + logger.UpdateLevelForTags(logPrm) c.internals = initInternals(appCfg, log) @@ -720,7 +731,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgFrostfsID = initFrostfsID(appCfg) - c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) + c.cfgNetmap = initNetmap(appCfg, netState) c.cfgGRPC = initCfgGRPC() @@ -766,12 +777,8 @@ func initSdNotify(appCfg *config.Config) bool { return false } -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared { - var netAddr network.AddressGroup - - if !relayOnly { - netAddr = nodeconfig.BootstrapAddresses(appCfg) - } +func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { + netAddr := nodeconfig.BootstrapAddresses(appCfg) persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) @@ -822,18 +829,15 @@ func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) interna return result } -func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { +func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) - var reBootstrapTurnedOff atomic.Bool - reBootstrapTurnedOff.Store(relayOnly) return cfgNetmap{ scriptHash: contractsconfig.Netmap(appCfg), state: netState, workerPool: netmapWorkerPool, - needBootstrap: !relayOnly, - reBoostrapTurnedOff: &reBootstrapTurnedOff, + reBoostrapTurnedOff: &atomic.Bool{}, } } @@ -853,14 +857,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } } -func initCfgGRPC() cfgGRPC { - maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload - maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes +func initCfgGRPC() (cfg cfgGRPC) { + maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload + maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes - return cfgGRPC{ - maxChunkSize: maxChunkSize, - maxAddrAmount: maxAddrAmount, - } + cfg.maxChunkSize = maxChunkSize + cfg.maxAddrAmount = maxAddrAmount + + return } func initCfgObject(appCfg *config.Config) cfgObject { @@ -877,9 +881,8 @@ func (c *cfg) engineOpts() []engine.Option { var opts []engine.Option opts = append(opts, - engine.WithShardPoolSize(c.EngineCfg.shardPoolSize), engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log), + engine.WithLogger(c.log.WithTag(logger.TagEngine)), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), ) @@ -916,7 +919,8 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { writecache.WithMaxCacheSize(wcRead.sizeLimit), writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log), + writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), + writecache.WithQoSLimiter(shCfg.limiter), ) } return writeCacheOpts @@ -955,7 +959,8 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithLogger(c.log), + blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), + blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), } @@ -978,7 +983,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. fstree.WithPerm(sRead.perm), fstree.WithDepth(sRead.depth), fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log), + fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), } if c.metricsCollector != nil { fstreeOpts = append(fstreeOpts, @@ -1008,12 +1013,9 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID ss := c.getSubstorageOpts(ctx, shCfg) blobstoreOpts := []blobstor.Option{ - blobstor.WithCompressObjects(shCfg.compress), - blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), - blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), - blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), + blobstor.WithCompression(shCfg.compression), blobstor.WithStorages(ss), - blobstor.WithLogger(c.log), + blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), } if c.metricsCollector != nil { blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) @@ -1032,12 +1034,13 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID } if c.metricsCollector != nil { mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) + shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics()) } var sh shardOptsWithID sh.configID = shCfg.id() sh.shOpts = []shard.Option{ - shard.WithLogger(c.log), + shard.WithLogger(c.log.WithTag(logger.TagShard)), shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithMode(shCfg.mode), @@ -1056,30 +1059,33 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return pool }), + shard.WithLimiter(shCfg.limiter), } return sh } -func (c *cfg) loggerPrm() (*logger.Prm, error) { - // check if it has been inited before - if c.dynamicConfiguration.logger == nil { - c.dynamicConfiguration.logger = new(logger.Prm) - } - +func (c *cfg) loggerPrm() (logger.Prm, error) { + var prm logger.Prm // (re)init read configuration - err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level) + err := prm.SetLevelString(c.LoggerCfg.level) if err != nil { // not expected since validation should be performed before - panic("incorrect log level format: " + c.LoggerCfg.level) + return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) } - err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination) + err = prm.SetDestination(c.LoggerCfg.destination) if err != nil { // not expected since validation should be performed before - panic("incorrect log destination format: " + c.LoggerCfg.destination) + return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) + } + prm.PrependTimestamp = c.LoggerCfg.timestamp + prm.Options = c.LoggerCfg.options + err = prm.SetTags(c.LoggerCfg.tags) + if err != nil { + // not expected since validation should be performed before + return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) } - c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp - return c.dynamicConfiguration.logger, nil + return prm, nil } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1121,7 +1127,7 @@ func initLocalStorage(ctx context.Context, c *cfg) { err := ls.Close(context.WithoutCancel(ctx)) if err != nil { c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, - zap.String("error", err.Error()), + zap.Error(err), ) } else { c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) @@ -1148,7 +1154,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { c.cfgObject.cfgAccessPolicyEngine.policyContractHash) cacheSize := morphconfig.APEChainCacheSize(c.appCfg) - if cacheSize > 0 { + if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) } @@ -1167,21 +1173,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { var err error - optNonBlocking := ants.WithNonblocking(true) - - putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote() - pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking) - fatalOnErr(err) - - putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal() - pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking) - fatalOnErr(err) - replicatorPoolSize := replicatorconfig.PoolSize(cfg) - if replicatorPoolSize <= 0 { - replicatorPoolSize = putRemoteCapacity - } - pool.replication, err = ants.NewPool(replicatorPoolSize) fatalOnErr(err) @@ -1207,11 +1199,11 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { } func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { - ni, err := c.netmapLocalNodeState(epoch) + ni, err := c.netmapLocalNodeState(ctx, epoch) if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -1221,9 +1213,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // with the binary-encoded information from the current node's configuration. // The state is set using the provided setter which MUST NOT be nil. -func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error { +func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error { ni := c.cfgNodeInfo.localInfo - stateSetter(&ni) + ni.SetStatus(state) prm := nmClient.AddPeerPrm{} prm.SetNodeInfo(ni) @@ -1233,9 +1225,7 @@ func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.N // bootstrapOnline calls cfg.bootstrapWithState with "online" state. func bootstrapOnline(ctx context.Context, c *cfg) error { - return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { - ni.SetStatus(netmap.Online) - }) + return c.bootstrapWithState(ctx, netmap.Online) } // bootstrap calls bootstrapWithState with: @@ -1246,9 +1236,7 @@ func (c *cfg) bootstrap(ctx context.Context) error { st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { - ni.SetStatus(netmap.Maintenance) - }) + return c.bootstrapWithState(ctx, netmap.Maintenance) } c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, @@ -1258,11 +1246,6 @@ func (c *cfg) bootstrap(ctx context.Context) error { return bootstrapOnline(ctx, c) } -// needBootstrap checks if local node should be registered in network on bootup. -func (c *cfg) needBootstrap() bool { - return c.cfgNetmap.needBootstrap -} - type dCmp struct { name string reloadFunc func() error @@ -1337,15 +1320,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { // all the components are expected to support // Logger's dynamic reconfiguration approach - // Logger - - logPrm, err := c.loggerPrm() - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) - return - } - - components := c.getComponents(ctx, logPrm) + components := c.getComponents(ctx) // Object c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) @@ -1383,10 +1358,17 @@ func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } -func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { +func (c *cfg) getComponents(ctx context.Context) []dCmp { var components []dCmp - components = append(components, dCmp{"logger", logPrm.Reload}) + components = append(components, dCmp{"logger", func() error { + prm, err := c.loggerPrm() + if err != nil { + return err + } + logger.UpdateLevelForTags(prm) + return nil + }}) components = append(components, dCmp{"runtime", func() error { setRuntimeParameters(ctx, c) return nil @@ -1407,6 +1389,12 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { } return err }}) + if c.treeService != nil { + components = append(components, dCmp{"tree", func() error { + c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) + return nil + }}) + } if cmp, updated := metricsComponent(c); updated { if cmp.enabled { cmp.preReload = enableMetricsSvc @@ -1419,17 +1407,13 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) } + components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }}) + return components } func (c *cfg) reloadPools() error { - newSize := objectconfig.Put(c.appCfg).PoolSizeLocal() - c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size") - - newSize = objectconfig.Put(c.appCfg).PoolSizeRemote() - c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size") - - newSize = replicatorconfig.PoolSize(c.appCfg) + newSize := replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") return nil @@ -1466,7 +1450,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker { func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { return container.NewInfoProvider(func() (container.Source, error) { c.initMorphComponents(ctx) - cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary()) + cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) if err != nil { return nil, err } diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go index 36e53ea7c..c40bf3620 100644 --- a/cmd/frostfs-node/config/calls.go +++ b/cmd/frostfs-node/config/calls.go @@ -1,6 +1,7 @@ package config import ( + "slices" "strings" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" @@ -52,6 +53,5 @@ func (x *Config) Value(name string) any { // It supports only one level of nesting and is intended to be used // to provide default values. func (x *Config) SetDefault(from *Config) { - x.defaultPath = make([]string, len(from.path)) - copy(x.defaultPath, from.path) + x.defaultPath = slices.Clone(from.path) } diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go index 35dae97d9..ee9d4268b 100644 --- a/cmd/frostfs-node/config/configdir_test.go +++ b/cmd/frostfs-node/config/configdir_test.go @@ -12,13 +12,10 @@ import ( func TestConfigDir(t *testing.T) { dir := t.TempDir() - cfgFileName0 := path.Join(dir, "cfg_00.json") - cfgFileName1 := path.Join(dir, "cfg_01.yml") + cfgFileName := path.Join(dir, "cfg_01.yml") - require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777)) - require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777)) + require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) c := New("", dir, "") require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) - require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size"))) } diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go new file mode 100644 index 000000000..1cd64a6f8 --- /dev/null +++ b/cmd/frostfs-node/config/container/container.go @@ -0,0 +1,27 @@ +package containerconfig + +import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + +const ( + subsection = "container" + listStreamSubsection = "list_stream" + + // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once. + ContainerBatchSizeDefault = 1000 +) + +// ContainerBatchSize returns the value of "batch_size" config parameter +// from "list_stream" subsection of "container" section. +// +// Returns ContainerBatchSizeDefault if the value is missing or if +// the value is not positive integer. +func ContainerBatchSize(c *config.Config) uint32 { + if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil { + return ContainerBatchSizeDefault + } + size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size") + if size == 0 { + return ContainerBatchSizeDefault + } + return size +} diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go new file mode 100644 index 000000000..744cd3295 --- /dev/null +++ b/cmd/frostfs-node/config/container/container_test.go @@ -0,0 +1,27 @@ +package containerconfig_test + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestContainerSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty)) + }) + + const path = "../../../../config/example/node" + fileConfigTest := func(c *config.Config) { + require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go index c944d1c58..7994e7809 100644 --- a/cmd/frostfs-node/config/engine/config.go +++ b/cmd/frostfs-node/config/engine/config.go @@ -11,10 +11,6 @@ import ( const ( subsection = "storage" - - // ShardPoolSizeDefault is a default value of routine pool size per-shard to - // process object PUT operations in a storage engine. - ShardPoolSizeDefault = 20 ) // ErrNoShardConfigured is returned when at least 1 shard is required but none are found. @@ -41,6 +37,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) c.Sub(si), ) + if sc.Mode() == mode.Disabled { + continue + } + // Path for the blobstor can't be present in the default section, because different shards // must have different paths, so if it is missing, the shard is not here. // At the same time checking for "blobstor" section doesn't work proper @@ -50,10 +50,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) } (*config.Config)(sc).SetDefault(def) - if sc.Mode() == mode.Disabled { - continue - } - if err := f(sc); err != nil { return err } @@ -65,18 +61,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) return nil } -// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section. -// -// Returns ShardPoolSizeDefault if the value is not a positive number. -func ShardPoolSize(c *config.Config) uint32 { - v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size") - if v > 0 { - return v - } - - return ShardPoolSizeDefault -} - // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // // Returns 0 if the the value is missing. diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 19ad0e7ac..401c54edc 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -14,10 +14,28 @@ import ( piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) +func TestIterateShards(t *testing.T) { + fileConfigTest := func(c *config.Config) { + var res []string + require.NoError(t, + engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { + res = append(res, sc.Metabase().Path()) + return nil + })) + require.Equal(t, []string{"abc", "xyz"}, res) + } + + const cfgDir = "./testdata/shards" + configtest.ForEachFileType(cfgDir, fileConfigTest) + configtest.ForEnvFileType(t, cfgDir, fileConfigTest) +} + func TestEngineSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() @@ -37,7 +55,6 @@ func TestEngineSection(t *testing.T) { require.False(t, handlerCalled) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) - require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty)) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) }) @@ -47,7 +64,6 @@ func TestEngineSection(t *testing.T) { num := 0 require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) - require.EqualValues(t, 15, engineconfig.ShardPoolSize(c)) err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { defer func() { @@ -60,6 +76,7 @@ func TestEngineSection(t *testing.T) { ss := blob.Storages() pl := sc.Pilorama() gc := sc.GC() + limits := sc.Limits() switch num { case 0: @@ -84,10 +101,11 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, true, sc.Compress()) - require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) - require.Equal(t, true, sc.EstimateCompressibility()) - require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) + require.Equal(t, true, sc.Compression().Enabled) + require.Equal(t, compression.LevelFastest, sc.Compression().Level) + require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) + require.Equal(t, true, sc.Compression().EstimateCompressibility) + require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -118,6 +136,86 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, 30*time.Second, readLimits.IdleTimeout) + require.Equal(t, int64(10_000), readLimits.MaxRunningOps) + require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) + require.Equal(t, 45*time.Second, writeLimits.IdleTimeout) + require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) + require.Equal(t, int64(100), writeLimits.MaxWaitingOps) + require.ElementsMatch(t, readLimits.Tags, + []qos.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(20), + ReservedOps: toPtr(1000), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(70), + ReservedOps: toPtr(10000), + }, + { + Tag: "background", + Weight: toPtr(5), + LimitOps: toPtr(10000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + { + Tag: "policer", + Weight: toPtr(5), + LimitOps: toPtr(25000), + Prohibited: true, + }, + { + Tag: "treesync", + Weight: toPtr(5), + LimitOps: toPtr(25), + }, + }) + require.ElementsMatch(t, writeLimits.Tags, + []qos.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(200), + ReservedOps: toPtr(100), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(700), + ReservedOps: toPtr(1000), + }, + { + Tag: "background", + Weight: toPtr(50), + LimitOps: toPtr(1000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "policer", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "treesync", + Weight: toPtr(50), + LimitOps: toPtr(100), + }, + }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, fs.FileMode(0o644), pl.Perm()) @@ -140,8 +238,9 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, false, sc.Compress()) - require.Equal(t, []string(nil), sc.UncompressableContentTypes()) + require.Equal(t, false, sc.Compression().Enabled) + require.Equal(t, compression.LevelDefault, sc.Compression().Level) + require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -172,6 +271,17 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) + require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) + require.Equal(t, 0, len(readLimits.Tags)) + require.Equal(t, 0, len(writeLimits.Tags)) } return nil }) @@ -185,3 +295,7 @@ func TestEngineSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) } + +func toPtr(v float64) *float64 { + return &v +} diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go index a51308b5b..b564d36f8 100644 --- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go +++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go @@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d < 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } // NoSync returns the value of "no_sync" config parameter. @@ -66,8 +60,5 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) PageSize() int { s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index 0620c9f63..d42646da7 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -4,9 +4,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -26,42 +28,27 @@ func From(c *config.Config) *Config { return (*Config)(c) } -// Compress returns the value of "compress" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) Compress() bool { - return config.BoolSafe( - (*config.Config)(x), - "compress", - ) -} - -// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. -// -// Returns nil if a the value is missing or is invalid. -func (x *Config) UncompressableContentTypes() []string { - return config.StringSliceSafe( - (*config.Config)(x), - "compression_exclude_content_types") -} - -// EstimateCompressibility returns the value of "estimate_compressibility" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) EstimateCompressibility() bool { - return config.BoolSafe( - (*config.Config)(x), - "compression_estimate_compressibility", - ) +func (x *Config) Compression() compression.Config { + cc := (*config.Config)(x).Sub("compression") + if cc == nil { + return compression.Config{} + } + return compression.Config{ + Enabled: config.BoolSafe(cc, "enabled"), + UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), + Level: compression.Level(config.StringSafe(cc, "level")), + EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), + EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), + } } // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func (x *Config) EstimateCompressibilityThreshold() float64 { +func estimateCompressibilityThreshold(c *config.Config) float64 { v := config.FloatOrDefault( - (*config.Config)(x), - "compression_estimate_compressibility_threshold", + c, + "estimate_compressibility_threshold", EstimateCompressibilityThresholdDefault) if v < 0.0 || v > 1.0 { return EstimateCompressibilityThresholdDefault @@ -125,6 +112,14 @@ func (x *Config) GC() *gcconfig.Config { ) } +// Limits returns "limits" subsection as a limitsconfig.Config. +func (x *Config) Limits() *limitsconfig.Config { + return limitsconfig.From( + (*config.Config)(x). + Sub("limits"), + ) +} + // RefillMetabase returns the value of "resync_metabase" config parameter. // // Returns false if the value is not a valid bool. diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go new file mode 100644 index 000000000..ccd1e0000 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -0,0 +1,112 @@ +package limits + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "github.com/spf13/cast" +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Config is a wrapper over the config section +// which provides access to Shard's limits configurations. +type Config config.Config + +func (x *Config) ToConfig() qos.LimiterConfig { + result := qos.LimiterConfig{ + Read: x.read(), + Write: x.write(), + } + panicOnErr(result.Validate()) + return result +} + +func (x *Config) read() qos.OpConfig { + return x.parse("read") +} + +func (x *Config) write() qos.OpConfig { + return x.parse("write") +} + +func (x *Config) parse(sub string) qos.OpConfig { + c := (*config.Config)(x).Sub(sub) + var result qos.OpConfig + + if s := config.Int(c, "max_waiting_ops"); s > 0 { + result.MaxWaitingOps = s + } else { + result.MaxWaitingOps = qos.NoLimit + } + + if s := config.Int(c, "max_running_ops"); s > 0 { + result.MaxRunningOps = s + } else { + result.MaxRunningOps = qos.NoLimit + } + + if s := config.DurationSafe(c, "idle_timeout"); s > 0 { + result.IdleTimeout = s + } else { + result.IdleTimeout = qos.DefaultIdleTimeout + } + + result.Tags = tags(c) + + return result +} + +func tags(c *config.Config) []qos.IOTagConfig { + c = c.Sub("tags") + var result []qos.IOTagConfig + for i := 0; ; i++ { + tag := config.String(c, strconv.Itoa(i)+".tag") + if tag == "" { + return result + } + + var tagConfig qos.IOTagConfig + tagConfig.Tag = tag + + v := c.Value(strconv.Itoa(i) + ".weight") + if v != nil { + w, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.Weight = &w + } + + v = c.Value(strconv.Itoa(i) + ".limit_ops") + if v != nil { + l, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.LimitOps = &l + } + + v = c.Value(strconv.Itoa(i) + ".reserved_ops") + if v != nil { + r, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.ReservedOps = &r + } + + v = c.Value(strconv.Itoa(i) + ".prohibited") + if v != nil { + r, err := cast.ToBoolE(v) + panicOnErr(err) + tagConfig.Prohibited = r + } + + result = append(result, tagConfig) + } +} + +func panicOnErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go index 28671ca55..5d4e8f408 100644 --- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go +++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go @@ -52,10 +52,7 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d <= 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s <= 0 { - s = 0 - } - return s + return max(s, 0) } diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env new file mode 100644 index 000000000..079789b0f --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.env @@ -0,0 +1,3 @@ +FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc +FROSTFS_STORAGE_SHARD_1_MODE=disabled +FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json new file mode 100644 index 000000000..b3d6abe85 --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.json @@ -0,0 +1,13 @@ +{ + "storage.shard": { + "0": { + "metabase.path": "abc" + }, + "1": { + "mode": "disabled" + }, + "2": { + "metabase.path": "xyz" + } + } +} diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml new file mode 100644 index 000000000..bbbba3af8 --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.yaml @@ -0,0 +1,7 @@ +storage.shard: + 0: + metabase.path: abc + 1: + mode: disabled + 2: + metabase.path: xyz diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go index ba9eeea2b..20f373184 100644 --- a/cmd/frostfs-node/config/logger/config.go +++ b/cmd/frostfs-node/config/logger/config.go @@ -2,6 +2,7 @@ package loggerconfig import ( "os" + "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -60,6 +61,21 @@ func Timestamp(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "timestamp") } +// Tags returns the value of "tags" config parameter from "logger" section. +func Tags(c *config.Config) [][]string { + var res [][]string + sub := c.Sub(subsection).Sub("tags") + for i := 0; ; i++ { + s := sub.Sub(strconv.FormatInt(int64(i), 10)) + names := config.StringSafe(s, "names") + if names == "" { + break + } + res = append(res, []string{names, config.StringSafe(s, "level")}) + } + return res +} + // ToLokiConfig extracts loki config. func ToLokiConfig(c *config.Config) loki.Config { hostname, _ := os.Hostname() diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go index ffe8ac693..796ad529e 100644 --- a/cmd/frostfs-node/config/logger/config_test.go +++ b/cmd/frostfs-node/config/logger/config_test.go @@ -22,6 +22,9 @@ func TestLoggerSection_Level(t *testing.T) { require.Equal(t, "debug", loggerconfig.Level(c)) require.Equal(t, "journald", loggerconfig.Destination(c)) require.Equal(t, true, loggerconfig.Timestamp(c)) + tags := loggerconfig.Tags(c) + require.Equal(t, "main, morph", tags[0][0]) + require.Equal(t, "debug", tags[0][1]) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index d089870ea..a9f774d18 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -33,6 +33,9 @@ const ( // ContainerCacheSizeDefault represents the default size for the container cache. ContainerCacheSizeDefault = 100 + + // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. + PollCandidatesTimeoutDefault = 20 * time.Second ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -154,3 +157,17 @@ func FrostfsIDCacheSize(c *config.Config) uint32 { } return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") } + +// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter +// from "morph" section. +// +// Returns PollCandidatesTimeoutDefault if the value is not positive duration. +func NetmapCandidatesPollInterval(c *config.Config) time.Duration { + v := config.DurationSafe(c.Sub(subsection). + Sub("netmap").Sub("candidates"), "poll_interval") + if v > 0 { + return v + } + + return PollCandidatesTimeoutDefault +} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 4d063245b..c50718c5f 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -3,7 +3,9 @@ package nodeconfig import ( "fmt" "io/fs" + "iter" "os" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -88,12 +90,8 @@ func Wallet(c *config.Config) *keys.PrivateKey { type stringAddressGroup []string -func (x stringAddressGroup) IterateAddresses(f func(string) bool) { - for i := range x { - if f(x[i]) { - break - } - } +func (x stringAddressGroup) Addresses() iter.Seq[string] { + return slices.Values(x) } func (x stringAddressGroup) NumberOfAddresses() int { @@ -133,14 +131,6 @@ func Attributes(c *config.Config) (attrs []string) { return } -// Relay returns the value of "relay" config parameter -// from "node" section. -// -// Returns false if the value is not set. -func Relay(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "relay") -} - // PersistentSessions returns structure that provides access to "persistent_sessions" // subsection of "node" section. func PersistentSessions(c *config.Config) PersistentSessionsConfig { @@ -198,7 +188,7 @@ func (l PersistentPolicyRulesConfig) Path() string { // // Returns PermDefault if the value is not a positive number. func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { - p := config.UintSafe((*config.Config)(l.cfg), "perm") + p := config.UintSafe(l.cfg, "perm") if p == 0 { p = PermDefault } @@ -210,10 +200,15 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { // // Returns false if the value is not a boolean. func (l PersistentPolicyRulesConfig) NoSync() bool { - return config.BoolSafe((*config.Config)(l.cfg), "no_sync") + return config.BoolSafe(l.cfg, "no_sync") } // CompatibilityMode returns true if need to run node in compatibility with previous versions mode. func CompatibilityMode(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") } + +// LocodeDBPath returns path to LOCODE database. +func LocodeDBPath(c *config.Config) string { + return config.String(c.Sub(subsection), "locode_db_path") +} diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index 7b9adecf4..9af1dc038 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -29,12 +29,10 @@ func TestNodeSection(t *testing.T) { ) attribute := Attributes(empty) - relay := Relay(empty) persisessionsPath := PersistentSessions(empty).Path() persistatePath := PersistentState(empty).Path() require.Empty(t, attribute) - require.Equal(t, false, relay) require.Equal(t, "", persisessionsPath) require.Equal(t, PersistentStatePathDefault, persistatePath) }) @@ -45,7 +43,6 @@ func TestNodeSection(t *testing.T) { key := Key(c) addrs := BootstrapAddresses(c) attributes := Attributes(c) - relay := Relay(c) wKey := Wallet(c) persisessionsPath := PersistentSessions(c).Path() persistatePath := PersistentState(c).Path() @@ -87,8 +84,6 @@ func TestNodeSection(t *testing.T) { return false }) - require.Equal(t, true, relay) - require.Len(t, attributes, 2) require.Equal(t, "Price:11", attributes[0]) require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go index 6ff1fe2ab..c8c967d30 100644 --- a/cmd/frostfs-node/config/object/config.go +++ b/cmd/frostfs-node/config/object/config.go @@ -21,10 +21,6 @@ const ( putSubsection = "put" getSubsection = "get" - - // PutPoolSizeDefault is a default value of routine pool size to - // process object.Put requests in object service. - PutPoolSizeDefault = 10 ) // Put returns structure that provides access to "put" subsection of @@ -35,30 +31,6 @@ func Put(c *config.Config) PutConfig { } } -// PoolSizeRemote returns the value of "remote_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeRemote() int { - v := config.Int(g.cfg, "remote_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - -// PoolSizeLocal returns the value of "local_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeLocal() int { - v := config.Int(g.cfg, "local_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - // SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. func (g PutConfig) SkipSessionTokenIssuerVerification() bool { return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go index e2bb105d9..1c525ef55 100644 --- a/cmd/frostfs-node/config/object/config_test.go +++ b/cmd/frostfs-node/config/object/config_test.go @@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote()) - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal()) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) }) @@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) { const path = "../../../../config/example/node" fileConfigTest := func(c *config.Config) { - require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote()) - require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal()) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) } diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go new file mode 100644 index 000000000..85f8180ed --- /dev/null +++ b/cmd/frostfs-node/config/qos/config.go @@ -0,0 +1,46 @@ +package qos + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +const ( + subsection = "qos" + criticalSubSection = "critical" + internalSubSection = "internal" +) + +// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, criticalSubSection) +} + +// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, internalSubSection) +} + +func authorizedKeys(c *config.Config, sub string) keys.PublicKeys { + strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys") + pubs := make(keys.PublicKeys, 0, len(strKeys)) + + for i := range strKeys { + pub, err := keys.NewPublicKeyFromString(strKeys[i]) + if err != nil { + panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err)) + } + + pubs = append(pubs, pub) + } + + return pubs +} diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go new file mode 100644 index 000000000..b3b6019cc --- /dev/null +++ b/cmd/frostfs-node/config/qos/config_test.go @@ -0,0 +1,40 @@ +package qos + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + + require.Empty(t, CriticalAuthorizedKeys(empty)) + require.Empty(t, InternalAuthorizedKeys(empty)) + }) + + const path = "../../../../config/example/node" + + criticalPubs := make(keys.PublicKeys, 2) + criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") + criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") + + internalPubs := make(keys.PublicKeys, 2) + internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2") + internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a") + + fileConfigTest := func(c *config.Config) { + require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c)) + require.Equal(t, internalPubs, InternalAuthorizedKeys(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go index 0fbac935c..e954bf19d 100644 --- a/cmd/frostfs-node/config/replicator/config.go +++ b/cmd/frostfs-node/config/replicator/config.go @@ -11,6 +11,8 @@ const ( // PutTimeoutDefault is a default timeout of object put request in replicator. PutTimeoutDefault = 5 * time.Second + // PoolSizeDefault is a default pool size for put request in replicator. + PoolSizeDefault = 10 ) // PutTimeout returns the value of "put_timeout" config parameter @@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration { // PoolSize returns the value of "pool_size" config parameter // from "replicator" section. +// +// Returns PoolSizeDefault if the value is non-positive integer. func PoolSize(c *config.Config) int { - return int(config.IntSafe(c.Sub(subsection), "pool_size")) + v := int(config.IntSafe(c.Sub(subsection), "pool_size")) + if v > 0 { + return v + } + + return PoolSizeDefault } diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go index 2129c01b4..2aa490946 100644 --- a/cmd/frostfs-node/config/replicator/config_test.go +++ b/cmd/frostfs-node/config/replicator/config_test.go @@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) { empty := configtest.EmptyConfig() require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) - require.Equal(t, 0, replicatorconfig.PoolSize(empty)) + require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) }) const path = "../../../../config/example/node" diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go new file mode 100644 index 000000000..e0efdfde2 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config.go @@ -0,0 +1,42 @@ +package rpcconfig + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" +) + +const ( + subsection = "rpc" + limitsSubsection = "limits" +) + +type LimitConfig struct { + Methods []string + MaxOps int64 +} + +// Limits returns the "limits" config from "rpc" section. +func Limits(c *config.Config) []LimitConfig { + c = c.Sub(subsection).Sub(limitsSubsection) + + var limits []LimitConfig + + for i := uint64(0); ; i++ { + si := strconv.FormatUint(i, 10) + sc := c.Sub(si) + + methods := config.StringSliceSafe(sc, "methods") + if len(methods) == 0 { + break + } + + if sc.Value("max_ops") == nil { + panic("no max operations for method group") + } + + limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) + } + + return limits +} diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go new file mode 100644 index 000000000..a6365e19f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config_test.go @@ -0,0 +1,77 @@ +package rpcconfig + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestRPCSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + require.Empty(t, Limits(configtest.EmptyConfig())) + }) + + t.Run("correct config", func(t *testing.T) { + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(1000)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("no max operations", func(t *testing.T) { + const path = "testdata/no_max_ops" + + fileConfigTest := func(c *config.Config) { + require.Panics(t, func() { _ = Limits(c) }) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("zero max operations", func(t *testing.T) { + const path = "testdata/zero_max_ops" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(0)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) +} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env new file mode 100644 index 000000000..2fed4c5bc --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env @@ -0,0 +1,3 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json new file mode 100644 index 000000000..6156aa71d --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json @@ -0,0 +1,18 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ] + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml new file mode 100644 index 000000000..e50b7ae93 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml @@ -0,0 +1,8 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env new file mode 100644 index 000000000..ce7302b0b --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env @@ -0,0 +1,4 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=0 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json new file mode 100644 index 000000000..16a1c173f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json @@ -0,0 +1,19 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 0 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml new file mode 100644 index 000000000..525d768d4 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml @@ -0,0 +1,9 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 0 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index d3e1b2766..bdb280d87 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -5,6 +5,7 @@ import ( "context" "net" + containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" @@ -28,10 +29,10 @@ import ( func initContainerService(_ context.Context, c *cfg) { // container wrapper that tries to invoke notary // requests if chain is configured so - wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary()) + wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) - c.shared.cnrClient = wrap + c.cnrClient = wrap cnrSrc := cntClient.AsContainerSource(wrap) @@ -42,11 +43,12 @@ func initContainerService(_ context.Context, c *cfg) { fatalOnErr(err) cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) - if cacheSize > 0 { + if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } - c.shared.frostfsidClient = frostfsIDSubjectProvider + c.frostfsidClient = frostfsIDSubjectProvider + c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), @@ -55,8 +57,10 @@ func initContainerService(_ context.Context, c *cfg) { service := containerService.NewSignService( &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, - containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc), + newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, + containerService.NewSplitterService( + c.cfgContainer.containerBatchSize, c.respSvc, + containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), ), ) service = containerService.NewAuditService(service, c.log, c.audit) @@ -96,7 +100,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c // TODO: use owner directly from the event after neofs-contract#256 will become resolved // but don't forget about the profit of reading the new container and caching it: // creation success are most commonly tracked by polling GET op. - cnr, err := cnrSrc.Get(ev.ID) + cnr, err := cnrSrc.Get(ctx, ev.ID) if err == nil { containerCache.containerCache.set(ev.ID, cnr, nil) } else { @@ -217,20 +221,25 @@ type morphContainerReader struct { src containerCore.Source lister interface { - ContainersOf(*user.ID) ([]cid.ID, error) + ContainersOf(context.Context, *user.ID) ([]cid.ID, error) + IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error } } -func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) { - return x.src.Get(id) +func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { + return x.src.Get(ctx, id) } -func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) { - return x.src.DeletionInfo(id) +func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { + return x.src.DeletionInfo(ctx, id) } -func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { - return x.lister.ContainersOf(id) +func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { + return x.lister.ContainersOf(ctx, id) +} + +func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error { + return x.lister.IterateContainersOf(ctx, id, processCID) } type morphContainerWriter struct { diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index ecd82bba5..1825013c7 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -7,9 +7,12 @@ import ( controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" + tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -50,7 +53,14 @@ func initControlService(ctx context.Context, c *cfg) { return } - c.cfgControlService.server = grpc.NewServer() + c.cfgControlService.server = grpc.NewServer( + grpc.ChainUnaryInterceptor( + qos.NewSetCriticalIOTagUnaryServerInterceptor(), + metrics.NewUnaryServerInterceptor(), + tracing.NewUnaryServerInterceptor(), + ), + // control service has no stream methods, so no stream interceptors added + ) c.onShutdown(func() { stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go index 3cca09105..d2d4e9785 100644 --- a/cmd/frostfs-node/frostfsid.go +++ b/cmd/frostfs-node/frostfsid.go @@ -1,6 +1,7 @@ package main import ( + "context" "strings" "time" @@ -42,7 +43,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int } } -func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) { +func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { hit := false startedAt := time.Now() defer func() { @@ -55,7 +56,7 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er return result.subject, result.err } - subj, err := m.subjProvider.GetSubject(addr) + subj, err := m.subjProvider.GetSubject(ctx, addr) if err != nil { if m.isCacheableError(err) { m.subjCache.Add(addr, subjectWithError{ @@ -69,7 +70,7 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er return subj, nil } -func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { +func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { hit := false startedAt := time.Now() defer func() { @@ -82,7 +83,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.Sub return result.subject, result.err } - subjExt, err := m.subjProvider.GetSubjectExtended(addr) + subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) if err != nil { if m.isCacheableError(err) { m.subjExtCache.Add(addr, subjectExtWithError{ diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 6105be861..6b6d44750 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -4,14 +4,19 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net" "time" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" + rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -130,12 +135,16 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( + qos.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), grpc.ChainStreamInterceptor( + qos.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), } @@ -224,3 +233,54 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) } + +func initRPCLimiter(c *cfg) error { + var limits []limiting.KeyLimit + for _, l := range rpcconfig.Limits(c.appCfg) { + limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) + } + + if err := validateRPCLimits(c, limits); err != nil { + return fmt.Errorf("validate RPC limits: %w", err) + } + + limiter, err := limiting.NewSemaphoreLimiter(limits) + if err != nil { + return fmt.Errorf("create RPC limiter: %w", err) + } + + c.cfgGRPC.limiter.Store(limiter) + return nil +} + +func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error { + availableMethods := getAvailableMethods(c.cfgGRPC.servers) + for _, limit := range limits { + for _, method := range limit.Keys { + if _, ok := availableMethods[method]; !ok { + return fmt.Errorf("set limit on an unknown method %q", method) + } + } + } + return nil +} + +func getAvailableMethods(servers []grpcServer) map[string]struct{} { + res := make(map[string]struct{}) + for _, server := range servers { + for _, method := range getMethodsForServer(server.Server) { + res[method] = struct{}{} + } + } + return res +} + +func getMethodsForServer(server *grpc.Server) []string { + var res []string + for service, info := range server.GetServiceInfo() { + for _, method := range info.Methods { + res = append(res, fmt.Sprintf("/%s/%s", service, method.Name)) + } + } + return res +} diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index f8854ab3c..0228d2a10 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -101,6 +101,7 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) }) initAccessPolicyEngine(ctx, c) initAndLog(ctx, c, "access policy engine", func(c *cfg) { @@ -116,6 +117,8 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "apemanager", initAPEManagerService) initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) + initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) }) + initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } @@ -134,7 +137,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C err := stopper(ctx) if err != nil { c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index 19b4af51f..d9ca01e70 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -8,38 +8,38 @@ import ( func metricsComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.metrics == nil { - c.dynamicConfiguration.metrics = new(httpComponent) - c.dynamicConfiguration.metrics.cfg = c - c.dynamicConfiguration.metrics.name = "metrics" - c.dynamicConfiguration.metrics.handler = metrics.Handler() + if c.metrics == nil { + c.metrics = new(httpComponent) + c.metrics.cfg = c + c.metrics.name = "metrics" + c.metrics.handler = metrics.Handler() updated = true } // (re)init read configuration enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.metrics.enabled { - c.dynamicConfiguration.metrics.enabled = enabled + if enabled != c.metrics.enabled { + c.metrics.enabled = enabled updated = true } address := metricsconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.metrics.address { - c.dynamicConfiguration.metrics.address = address + if address != c.metrics.address { + c.metrics.address = address updated = true } dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.metrics.shutdownDur { - c.dynamicConfiguration.metrics.shutdownDur = dur + if dur != c.metrics.shutdownDur { + c.metrics.shutdownDur = dur updated = true } - return c.dynamicConfiguration.metrics, updated + return c.metrics, updated } func enableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Enable() + c.metricsSvc.Enable() } func disableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Disable() + c.metricsSvc.Disable() } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 67d2d1c06..917cf6fc0 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -35,20 +36,16 @@ func (c *cfg) initMorphComponents(ctx context.Context) { lookupScriptHashesInNNS(c) // smart contract auto negotiation - if c.cfgMorph.notaryEnabled { - err := c.cfgMorph.client.EnableNotarySupport( - client.WithProxyContract( - c.cfgMorph.proxyScriptHash, - ), - ) - fatalOnErr(err) - } - - c.log.Info(ctx, logs.FrostFSNodeNotarySupport, - zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), + err := c.cfgMorph.client.EnableNotarySupport( + client.WithProxyContract( + c.cfgMorph.proxyScriptHash, + ), ) + fatalOnErr(err) - wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary()) + c.log.Info(ctx, logs.FrostFSNodeNotarySupport) + + wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0) fatalOnErr(err) var netmapSource netmap.Source @@ -64,10 +61,11 @@ func (c *cfg) initMorphComponents(ctx context.Context) { } if c.cfgMorph.cacheTTL < 0 { - netmapSource = wrap + netmapSource = newRawNetmapStorage(wrap) } else { // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) + netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, + morphconfig.NetmapCandidatesPollInterval(c.appCfg)) } c.netMapSource = netmapSource @@ -87,7 +85,7 @@ func initMorphClient(ctx context.Context, c *cfg) { cli, err := client.New(ctx, c.key, client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log), + client.WithLogger(c.log.WithTag(logger.TagMorph)), client.WithMetrics(c.metricsCollector.MorphClientMetrics()), client.WithEndpoints(addresses...), client.WithConnLostCallback(func() { @@ -100,7 +98,7 @@ func initMorphClient(ctx context.Context, c *cfg) { if err != nil { c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, zap.Any("endpoints", addresses), - zap.String("error", err.Error()), + zap.Error(err), ) fatalOnErr(err) @@ -116,15 +114,9 @@ func initMorphClient(ctx context.Context, c *cfg) { } c.cfgMorph.client = cli - c.cfgMorph.notaryEnabled = cli.ProbeNotary() } func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { - // skip notary deposit in non-notary environments - if !c.cfgMorph.notaryEnabled { - return - } - tx, vub, err := makeNotaryDeposit(ctx, c) fatalOnErr(err) @@ -161,7 +153,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error } func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { - if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil { + if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { return err } @@ -174,22 +166,23 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { err error subs subscriber.Subscriber ) + log := c.log.WithTag(logger.TagMorph) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) } subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: c.log, + Log: log, StartFromBlock: fromSideChainBlock, Client: c.cfgMorph.client, }) fatalOnErr(err) lis, err := event.NewListener(event.ListenerParams{ - Logger: c.log, + Logger: log, Subscriber: subs, }) fatalOnErr(err) @@ -207,7 +200,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, + log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -218,11 +211,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { - c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, + log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } @@ -233,27 +226,17 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse subs map[event.Type][]event.Handler, ) { for typ, handlers := range subs { - pi := event.NotificationParserInfo{} - pi.SetType(typ) - pi.SetScriptHash(scHash) - p, ok := parsers[typ] if !ok { panic(fmt.Sprintf("missing parser for event %s", typ)) } - pi.SetParser(p) - - lis.SetNotificationParser(pi) - - for _, h := range handlers { - hi := event.NotificationHandlerInfo{} - hi.SetType(typ) - hi.SetScriptHash(scHash) - hi.SetHandler(h) - - lis.RegisterNotificationHandler(hi) - } + lis.RegisterNotificationHandler(event.NotificationHandlerInfo{ + Contract: scHash, + Type: typ, + Parser: p, + Handlers: handlers, + }) } } @@ -282,10 +265,6 @@ func lookupScriptHashesInNNS(c *cfg) { ) for _, t := range targets { - if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled { - continue // ignore proxy contract if notary disabled - } - if emptyHash.Equals(*t.h) { *t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName) fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 9127d1123..7dfb4fe12 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,6 +8,7 @@ import ( "net" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -86,7 +87,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { } } - s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt)) + s.setControlNetmapStatus(ctrlNetSt) } // sets the current node state to the given value. Subsequent cfg.bootstrap @@ -104,9 +105,7 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { v := s.nodeInfo.Load() if v != nil { res, ok = v.(netmapSDK.NodeInfo) - if !ok { - panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v)) - } + assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) } return @@ -124,7 +123,11 @@ func nodeKeyFromNetmap(c *cfg) []byte { func (c *cfg) iterateNetworkAddresses(f func(string) bool) { ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - ni.IterateNetworkEndpoints(f) + for s := range ni.NetworkEndpoints() { + if f(s) { + return + } + } } } @@ -184,7 +187,7 @@ func addNewEpochNotificationHandlers(c *cfg) { c.updateContractNodeInfo(ctx, e) - if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 + if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } @@ -193,29 +196,25 @@ func addNewEpochNotificationHandlers(c *cfg) { } }) - if c.cfgMorph.notaryEnabled { - addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { - _, _, err := makeNotaryDeposit(ctx, c) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, - zap.String("error", err.Error()), - ) - } - }) - } + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { + _, _, err := makeNotaryDeposit(ctx, c) + if err != nil { + c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, + zap.Error(err), + ) + } + }) } // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. func bootstrapNode(ctx context.Context, c *cfg) { - if c.needBootstrap() { - if c.IsMaintenance() { - c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return - } - err := c.bootstrap(ctx) - fatalOnErrDetails("bootstrap error", err) + if c.IsMaintenance() { + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + return } + err := c.bootstrap(ctx) + fatalOnErrDetails("bootstrap error", err) } func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { @@ -241,7 +240,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser // initNetmapState inits current Network map state. // Must be called after Morph components initialization. func initNetmapState(ctx context.Context, c *cfg) { - epoch, err := c.cfgNetmap.wrapper.Epoch() + epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) fatalOnErrDetails("could not initialize current epoch number", err) var ni *netmapSDK.NodeInfo @@ -280,7 +279,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string { } func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { - nmNodes, err := c.cfgNetmap.wrapper.GetCandidates() + nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) if err != nil { return nil, err } @@ -293,7 +292,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm } } - node, err := c.netmapLocalNodeState(epoch) + node, err := c.netmapLocalNodeState(ctx, epoch) if err != nil { return nil, err } @@ -314,9 +313,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm return candidate, nil } -func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { +func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { // calculate current network state - nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch) + nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) if err != nil { return nil, err } @@ -351,8 +350,6 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { ) } -var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") - func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: @@ -364,10 +361,6 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro c.stopMaintenance(ctx) - if !c.needBootstrap() { - return errRelayBootstrap - } - if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) return bootstrapOnline(ctx, c) @@ -378,8 +371,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) } -func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { - epoch, err := c.netMapSource.Epoch() +func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { + epoch, err := c.netMapSource.Epoch(ctx) if err != nil { return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) } @@ -392,7 +385,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error { } func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { - netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration() + netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) if err != nil { err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) } else if !netSettings.MaintenanceModeAllowed { @@ -425,7 +418,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient. if err != nil { return err } - return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res) + return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) } type netInfo struct { @@ -440,7 +433,7 @@ type netInfo struct { msPerBlockRdr func() (int64, error) } -func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { +func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { magic, err := n.magic.MagicNumber() if err != nil { return nil, err @@ -450,7 +443,7 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetMagicNumber(magic) - netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration() + netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) if err != nil { return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) } diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go new file mode 100644 index 000000000..e6be9cdf5 --- /dev/null +++ b/cmd/frostfs-node/netmap_source.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +type rawNetmapSource struct { + client *netmapClient.Client +} + +func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { + return &rawNetmapSource{ + client: client, + } +} + +func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMap(ctx, diff) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMapByEpoch(ctx, epoch) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { + return s.client.Epoch(ctx) +} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index c4205a620..c33c02b3f 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -13,11 +13,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" @@ -33,6 +31,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -55,11 +54,11 @@ type objectSvc struct { patch *patchsvc.Service } -func (c *cfg) MaxObjectSize() uint64 { - sz, err := c.cfgNetmap.wrapper.MaxObjectSize() +func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { + sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, - zap.String("error", err.Error()), + c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, + zap.Error(err), ) } @@ -123,8 +122,8 @@ type innerRingFetcherWithNotary struct { sidechain *morphClient.Client } -func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { - keys, err := fn.sidechain.NeoFSAlphabetList() +func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { + keys, err := fn.sidechain.NeoFSAlphabetList(ctx) if err != nil { return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) } @@ -137,24 +136,6 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { return result, nil } -type innerRingFetcherWithoutNotary struct { - nm *nmClient.Client -} - -func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) { - keys, err := f.nm.GetInnerRingList() - if err != nil { - return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err) - } - - result := make([][]byte, 0, len(keys)) - for i := range keys { - result = append(result, keys[i].Bytes()) - } - - return result, nil -} - func initObjectService(c *cfg) { keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) @@ -187,16 +168,14 @@ func initObjectService(c *cfg) { sPatch := createPatchSvc(sGet, sPut) // build service pipeline - // grpc | audit | | signature | response | acl | ape | split + // grpc | audit | qos | | signature | response | acl | ape | split splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - apeSvc := createAPEService(c, splitSvc) - - aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) + apeSvc := createAPEService(c, &irFetcher, splitSvc) var commonSvc objectService.Common - commonSvc.Init(&c.internals, aclSvc) + commonSvc.Init(&c.internals, apeSvc) respSvc := objectService.NewResponseService( &commonSvc, @@ -208,9 +187,10 @@ func initObjectService(c *cfg) { respSvc, ) - c.shared.metricsSvc = objectService.NewMetricCollector( + c.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit) + qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) + auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { @@ -234,14 +214,12 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl prm.MarkAsGarbage(addr) prm.WithForceRemoval() - _, err := ls.Inhume(ctx, prm) - return err + return ls.Inhume(ctx, prm) } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) - pol := policer.New( - policer.WithLogger(c.log), + policer.WithLogger(c.log.WithTag(logger.TagPolicer)), policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), policer.WithBuryFunc(buryFn), policer.WithContainerSource(c.cfgObject.cnrSource), @@ -285,10 +263,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl var inhumePrm engine.InhumePrm inhumePrm.MarkAsGarbage(addr) - _, err := ls.Inhume(ctx, inhumePrm) - if err != nil { + if err := ls.Inhume(ctx, inhumePrm); err != nil { c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, - zap.String("error", err.Error()), + zap.Error(err), ) } }), @@ -304,14 +281,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl }) } -func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { - if c.cfgMorph.client.ProbeNotary() { - return &innerRingFetcherWithNotary{ - sidechain: c.cfgMorph.client, - } - } - return &innerRingFetcherWithoutNotary{ - nm: c.cfgNetmap.wrapper, +func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { + return &innerRingFetcherWithNotary{ + sidechain: c.cfgMorph.client, } } @@ -319,7 +291,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( - replicator.WithLogger(c.log), + replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), replicator.WithPutTimeout( replicatorconfig.PutTimeout(c.appCfg), ), @@ -351,7 +323,6 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche c, c.cfgNetmap.state, irFetcher, - objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal), objectwriter.WithLogger(c.log), objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), ) @@ -377,7 +348,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav c.netMapSource, keyStorage, containerSource, - searchsvc.WithLogger(c.log), + searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), ) } @@ -403,7 +374,7 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ), coreConstructor, containerSource, - getsvc.WithLogger(c.log)) + getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) } func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -414,7 +385,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag c.netMapSource, c, c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log), + getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), ) } @@ -431,7 +402,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi cfg: c, }, keyStorage, - deletesvc.WithLogger(c.log), + deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), ) } @@ -455,28 +426,19 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi ) } -func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { - return v2.New( - apeSvc, - c.netMapSource, - irFetcher, - c.cfgObject.cnrSource, - v2.WithLogger(c.log), - ) -} - -func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { +func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.shared.frostfsidClient, + c.frostfsidClient, c.netMapSource, c.cfgNetmap.state, c.cfgObject.cnrSource, c.binPublicKey, ), + objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), splitSvc, ) } @@ -500,8 +462,7 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad prm.WithTarget(tombstone, addrs...) - _, err := e.engine.Inhume(ctx, prm) - return err + return e.engine.Inhume(ctx, prm) } func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index 5b40c8a88..e4da8119f 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) { func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.pprof == nil { - c.dynamicConfiguration.pprof = new(httpComponent) - c.dynamicConfiguration.pprof.cfg = c - c.dynamicConfiguration.pprof.name = "pprof" - c.dynamicConfiguration.pprof.handler = httputil.Handler() - c.dynamicConfiguration.pprof.preReload = tuneProfilers + if c.pprof == nil { + c.pprof = new(httpComponent) + c.pprof.cfg = c + c.pprof.name = "pprof" + c.pprof.handler = httputil.Handler() + c.pprof.preReload = tuneProfilers updated = true } // (re)init read configuration enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.pprof.enabled { - c.dynamicConfiguration.pprof.enabled = enabled + if enabled != c.pprof.enabled { + c.pprof.enabled = enabled updated = true } address := profilerconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.pprof.address { - c.dynamicConfiguration.pprof.address = address + if address != c.pprof.address { + c.pprof.address = address updated = true } dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.pprof.shutdownDur { - c.dynamicConfiguration.pprof.shutdownDur = dur + if dur != c.pprof.shutdownDur { + c.pprof.shutdownDur = dur updated = true } - return c.dynamicConfiguration.pprof, updated + return c.pprof, updated } func tuneProfilers(c *cfg) { diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go new file mode 100644 index 000000000..6394b668b --- /dev/null +++ b/cmd/frostfs-node/qos.go @@ -0,0 +1,108 @@ +package main + +import ( + "bytes" + "context" + + qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "go.uber.org/zap" +) + +type cfgQoSService struct { + netmapSource netmap.Source + logger *logger.Logger + allowedCriticalPubs [][]byte + allowedInternalPubs [][]byte +} + +func initQoSService(c *cfg) { + criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) + internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg) + rawCriticalPubs := make([][]byte, 0, len(criticalPubs)) + rawInternalPubs := make([][]byte, 0, len(internalPubs)) + for i := range criticalPubs { + rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes()) + } + for i := range internalPubs { + rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) + } + + c.cfgQoSService = cfgQoSService{ + netmapSource: c.netMapSource, + logger: c.log, + allowedCriticalPubs: rawCriticalPubs, + allowedInternalPubs: rawInternalPubs, + } +} + +func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { + rawTag, defined := qosTagging.IOTagFromContext(ctx) + if !defined { + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) + } + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + ioTag, err := qos.FromRawString(rawTag) + if err != nil { + s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + + switch ioTag { + case qos.IOTagClient: + return ctx + case qos.IOTagCritical: + for _, pk := range s.allowedCriticalPubs { + if bytes.Equal(pk, requestSignPublicKey) { + return ctx + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), requestSignPublicKey) { + return ctx + } + } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + case qos.IOTagInternal: + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return ctx + } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + default: + s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } +} + +func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { + for _, pk := range s.allowedInternalPubs { + if bytes.Equal(pk, publicKey) { + return true + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return false + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), publicKey) { + return true + } + } + + return false +} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go new file mode 100644 index 000000000..971f9eebf --- /dev/null +++ b/cmd/frostfs-node/qos_test.go @@ -0,0 +1,226 @@ +package main + +import ( + "context" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSService_Client(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag client defined", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func TestQoSService_Internal(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) +} + +func TestQoSService_Critical(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) +} + +func TestQoSService_NetmapGetError(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + s.netmapSource = &utilTesting.TestNetmapSource{} + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { + nmSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + reqSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedCritSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedIntSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + var node netmap.NodeInfo + node.SetPublicKey(nmSigner.PublicKey().Bytes()) + nm := &netmap.NetMap{} + nm.SetEpoch(100) + nm.SetNodes([]netmap.NodeInfo{node}) + + return &cfgQoSService{ + logger: test.NewLogger(t), + netmapSource: &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ + 100: nm, + }, + CurrentEpoch: 100, + }, + allowedCriticalPubs: [][]byte{ + allowedCritSigner.PublicKey().Bytes(), + }, + allowedInternalPubs: [][]byte{ + allowedIntSigner.PublicKey().Bytes(), + }, + }, + &testQoSServicePublicKeys{ + NetmapNode: nmSigner.PublicKey().Bytes(), + Request: reqSigner.PublicKey().Bytes(), + Internal: allowedIntSigner.PublicKey().Bytes(), + Critical: allowedCritSigner.PublicKey().Bytes(), + } +} + +type testQoSServicePublicKeys struct { + NetmapNode []byte + Request []byte + Internal []byte + Critical []byte +} diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index 2f3c9cbfe..fbfe3f5e6 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -55,7 +56,7 @@ func initSessionService(c *cfg) { server := sessionTransportGRPC.New( sessionSvc.NewSignService( &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log), + sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), ), ) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index c423c0660..62af45389 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" "google.golang.org/grpc" @@ -29,16 +30,16 @@ type cnrSource struct { cli *containerClient.Client } -func (c cnrSource) Get(id cid.ID) (*container.Container, error) { - return c.src.Get(id) +func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { + return c.src.Get(ctx, id) } -func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) { - return c.src.DeletionInfo(cid) +func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { + return c.src.DeletionInfo(ctx, cid) } -func (c cnrSource) List() ([]cid.ID, error) { - return c.cli.ContainersOf(nil) +func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { + return c.cli.ContainersOf(ctx, nil) } func initTreeService(c *cfg) { @@ -51,12 +52,12 @@ func initTreeService(c *cfg) { c.treeService = tree.New( tree.WithContainerSource(cnrSource{ src: c.cfgObject.cnrSource, - cli: c.shared.cnrClient, + cli: c.cnrClient, }), - tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), + tree.WithFrostfsidSubjectProvider(c.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log), + tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), tree.WithContainerCacheSize(treeConfig.CacheSize()), tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), @@ -72,7 +73,7 @@ func initTreeService(c *cfg) { ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - tree.RegisterTreeServiceServer(s, c.treeService) + tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) }) c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { @@ -113,7 +114,7 @@ func initTreeService(c *cfg) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index ae52b9e4a..22d2e0aa9 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -30,6 +30,11 @@ func validateConfig(c *config.Config) error { return fmt.Errorf("invalid logger destination: %w", err) } + err = loggerPrm.SetTags(loggerconfig.Tags(c)) + if err != nil { + return fmt.Errorf("invalid list of allowed tags: %w", err) + } + // shard configuration validation shardNum := 0 diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index d9c0f167f..495365cf0 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -1,7 +1,6 @@ package main import ( - "os" "path/filepath" "testing" @@ -22,17 +21,4 @@ func TestValidate(t *testing.T) { require.NoError(t, err) }) }) - - t.Run("mainnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) - t.Run("testnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) } diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go index c5e2a3a99..d8b2e88a2 100644 --- a/cmd/internal/common/ape/flags.go +++ b/cmd/internal/common/ape/flags.go @@ -2,7 +2,6 @@ package ape const ( RuleFlag = "rule" - RuleFlagDesc = "Rule statement" PathFlag = "path" PathFlagDesc = "Path to encoded chain in JSON or binary format" TargetNameFlag = "target-name" @@ -17,3 +16,64 @@ const ( ChainNameFlagDesc = "Chain name(ingress|s3)" AllFlag = "all" ) + +const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format: + [:status_detail] ... ... ... + +Status: + - allow Permits specified actions + - deny Prohibits specified actions + - deny:QuotaLimitReached Denies access due to quota limits + +Actions: + Object operations: + - Object.Put, Object.Get, etc. + - Object.* (all object operations) + Container operations: + - Container.Put, Container.Get, etc. + - Container.* (all container operations) + +Conditions: + ResourceCondition: + Format: ResourceCondition:"key"=value, "key"!=value + Reserved properties (use '\' before '$'): + - $Object:version + - $Object:objectID + - $Object:containerID + - $Object:ownerID + - $Object:creationEpoch + - $Object:payloadLength + - $Object:payloadHash + - $Object:objectType + - $Object:homomorphicHash + +RequestCondition: + Format: RequestCondition:"key"=value, "key"!=value + Reserved properties (use '\' before '$'): + - $Actor:publicKey + - $Actor:role + + Example: + ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others + +Resources: + For objects: + - namespace/cid/oid (specific object) + - namespace/cid/* (all objects in container) + - namespace/* (all objects in namespace) + - * (all objects) + - /* (all objects in root namespace) + - /cid/* (all objects in root container) + - /cid/oid (specific object in root container) + + For containers: + - namespace/cid (specific container) + - namespace/* (all containers in namespace) + - * (all containers) + - /cid (root container) + - /* (all root containers) + +Notes: + - Cannot mix object and container operations in one rule + - Default behavior is Any=false unless 'any' is specified + - Use 'all' keyword to explicitly set Any=false` diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index b8acf0143..13f447af4 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -51,8 +51,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { } cmd.PrintErrln(err) - if cmd.PersistentPostRun != nil { - cmd.PersistentPostRun(cmd, nil) + for p := cmd; p != nil; p = p.Parent() { + if p.PersistentPostRun != nil { + p.PersistentPostRun(cmd, nil) + if !cobra.EnableTraverseRunHooks { + break + } + } } os.Exit(code) } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index f550552d2..5dd1a060e 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -27,15 +27,15 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - netmap.IterateNetworkEndpoints(node, func(endpoint string) { + for endpoint := range node.NetworkEndpoints() { cmd.Printf("%s ", endpoint) - }) + } cmd.Println() if !short { - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { cmd.Printf("%s\t%s: %s\n", indent, key, value) - }) + } } } diff --git a/config/example/ir.env b/config/example/ir.env index ebd91c243..c13044a6e 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -1,5 +1,7 @@ FROSTFS_IR_LOGGER_LEVEL=info FROSTFS_IR_LOGGER_TIMESTAMP=true +FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_IR_WALLET_PATH=/path/to/wallet.json FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX diff --git a/config/example/ir.yaml b/config/example/ir.yaml index 49f9fd324..ed53f014b 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -3,6 +3,9 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" timestamp: true + tags: + - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. + level: debug wallet: path: /path/to/wallet.json # Path to NEP-6 NEO wallet file diff --git a/config/example/node.env b/config/example/node.env index f470acf3e..9a2426358 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -1,6 +1,8 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_LOGGER_DESTINATION=journald FROSTFS_LOGGER_TIMESTAMP=true +FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 @@ -20,9 +22,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" FROSTFS_NODE_ATTRIBUTE_0=Price:11 FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" -FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state +FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db # Tree service section FROSTFS_TREE_ENABLED=true @@ -83,15 +85,20 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s FROSTFS_REPLICATOR_PUT_TIMEOUT=15s FROSTFS_REPLICATOR_POOL_SIZE=10 +# Container service section +FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 + # Object service section -FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 -FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=1000 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 + # Storage engine section -FROSTFS_STORAGE_SHARD_POOL_SIZE=15 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 ## 0 shard ### Flag to refill Metabase from BlobStor @@ -116,7 +123,8 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESS=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 @@ -151,6 +159,54 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 #### Limit of concurrent workers collecting expired objects by the garbage collector FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 +#### Limits config +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 ## 1 shard ### Flag to refill Metabase from BlobStor @@ -222,3 +278,6 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" FROSTFS_MULTINET_BALANCER=roundrobin FROSTFS_MULTINET_RESTRICT=false FROSTFS_MULTINET_FALLBACK_DELAY=350ms + +FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" +FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" diff --git a/config/example/node.json b/config/example/node.json index dba3bad8b..6b7a9c2c6 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -2,7 +2,13 @@ "logger": { "level": "debug", "destination": "journald", - "timestamp": true + "timestamp": true, + "tags": [ + { + "names": "main, morph", + "level": "debug" + } + ] }, "pprof": { "enabled": true, @@ -31,13 +37,13 @@ ], "attribute_0": "Price:11", "attribute_1": "UN-LOCODE:RU MSK", - "relay": true, "persistent_sessions": { "path": "/sessions" }, "persistent_state": { "path": "/state" - } + }, + "locode_db_path": "/path/to/locode/db" }, "grpc": { "0": { @@ -124,21 +130,40 @@ "pool_size": 10, "put_timeout": "15s" }, + "container": { + "list_stream": { + "batch_size": "500" + } + }, "object": { "delete": { "tombstone_lifetime": 10 }, "put": { - "remote_pool_size": 100, - "local_pool_size": 200, "skip_session_token_issuer_verification": true }, "get": { "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] } }, + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 1000 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + }, "storage": { - "shard_pool_size": 15, "shard_ro_error_threshold": 100, "shard": { "0": { @@ -163,12 +188,15 @@ "max_batch_size": 100, "max_batch_delay": "10ms" }, - "compress": true, - "compression_exclude_content_types": [ - "audio/*", "video/*" - ], - "compression_estimate_compressibility": true, - "compression_estimate_compressibility_threshold": 0.7, + "compression": { + "enabled": true, + "level": "fastest", + "exclude_content_types": [ + "audio/*", "video/*" + ], + "estimate_compressibility": true, + "estimate_compressibility_threshold": 0.7 + }, "small_object_size": 102400, "blobstor": [ { @@ -201,6 +229,87 @@ "remover_sleep_interval": "2m", "expired_collector_batch_size": 1500, "expired_collector_worker_count": 15 + }, + "limits": { + "read": { + "max_running_ops": 10000, + "max_waiting_ops": 1000, + "idle_timeout": "30s", + "tags": [ + { + "tag": "internal", + "weight": 20, + "limit_ops": 0, + "reserved_ops": 1000 + }, + { + "tag": "client", + "weight": 70, + "reserved_ops": 10000 + }, + { + "tag": "background", + "weight": 5, + "limit_ops": 10000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 5, + "limit_ops": 25000 + }, + { + "tag": "policer", + "weight": 5, + "limit_ops": 25000, + "prohibited": true + }, + { + "tag": "treesync", + "weight": 5, + "limit_ops": 25 + } + ] + }, + "write": { + "max_running_ops": 1000, + "max_waiting_ops": 100, + "idle_timeout": "45s", + "tags": [ + { + "tag": "internal", + "weight": 200, + "limit_ops": 0, + "reserved_ops": 100 + }, + { + "tag": "client", + "weight": 700, + "reserved_ops": 1000 + }, + { + "tag": "background", + "weight": 50, + "limit_ops": 1000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "policer", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "treesync", + "weight": 50, + "limit_ops": 100 + } + ] + } } }, "1": { @@ -221,7 +330,9 @@ "max_batch_size": 200, "max_batch_delay": "20ms" }, - "compress": false, + "compression": { + "enabled": false + }, "small_object_size": 102400, "blobstor": [ { @@ -300,5 +411,19 @@ "balancer": "roundrobin", "restrict": false, "fallback_delay": "350ms" + }, + "qos": { + "critical": { + "authorized_keys": [ + "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", + "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" + ] + }, + "internal": { + "authorized_keys": [ + "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", + "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" + ] + } } } diff --git a/config/example/node.yaml b/config/example/node.yaml index 8f9300b4a..2d4bc90fb 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -2,6 +2,9 @@ logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" timestamp: true + tags: + - names: "main, morph" + level: debug systemdnotify: enabled: true @@ -31,11 +34,11 @@ node: - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK - relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node + "locode_db_path": "/path/to/locode/db" grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server @@ -79,7 +82,8 @@ contracts: # side chain NEOFS contract script hashes; optional, override values morph: dial_timeout: 30s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching. + cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). + # Negative value disables caching. A zero value sets the default value. # Default value: block time. It is recommended to have this value less or equal to block time. # Cached entities: containers, container lists, eACL tables. container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. @@ -94,6 +98,9 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 + netmap: + candidates: + poll_interval: 20s apiclient: dial_timeout: 15s # timeout for FrostFS API client connection @@ -108,21 +115,31 @@ replicator: put_timeout: 15s # timeout for the Replicator PUT remote operation pool_size: 10 # maximum amount of concurrent replications +container: + list_stream: + batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once + object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs put: - remote_pool_size: 100 # number of async workers for remote PUT operations - local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true get: priority: # list of metrics of nodes for prioritization - $attribute:ClusterName - $attribute:UN-LOCODE +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 + storage: - # note: shard configuration can be omitted for relay node (see `node.relay`) - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: @@ -136,7 +153,7 @@ storage: flush_worker_count: 30 # number of write-cache flusher threads metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) + perm: 0o644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms @@ -144,18 +161,19 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compress: false # turn on/off zstd(level 3) compression of stored objects + compression: + enabled: false # turn on/off zstd compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: @@ -186,12 +204,14 @@ storage: max_batch_size: 100 max_batch_delay: 10ms - compress: true # turn on/off zstd(level 3) compression of stored objects - compression_exclude_content_types: - - audio/* - - video/* - compression_estimate_compressibility: true - compression_estimate_compressibility_threshold: 0.7 + compression: + enabled: true # turn on/off zstd compression of stored objects + level: fastest + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza @@ -214,6 +234,59 @@ storage: expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector + limits: + read: + max_running_ops: 10000 + max_waiting_ops: 1000 + idle_timeout: 30s + tags: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + prohibited: true + - tag: treesync + weight: 5 + limit_ops: 25 + write: + max_running_ops: 1000 + max_waiting_ops: 100 + idle_timeout: 45s + tags: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 + - tag: treesync + weight: 50 + limit_ops: 100 + 1: writecache: path: tmp/1/cache # write-cache root directory @@ -232,7 +305,7 @@ storage: pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0644 # permission to use for the database file and intermediate directories + perm: 0o644 # permission to use for the database file and intermediate directories tracing: enabled: true @@ -265,3 +338,13 @@ multinet: balancer: roundrobin restrict: false fallback_delay: 350ms + +qos: + critical: + authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag + - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 + - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a diff --git a/config/mainnet/README.md b/config/mainnet/README.md deleted file mode 100644 index 717a9b0ff..000000000 --- a/config/mainnet/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# N3 Mainnet Storage node configuration - -Here is a template for simple storage node configuration in N3 Mainnet. -Make sure to specify correct values instead of `<...>` placeholders. -Do not change `contracts` section. Run the latest frostfs-node release with -the fixed config `frostfs-node -c config.yml` - -To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract. -The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221` -(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`) - -## Tips - -Use `grpcs://` scheme in the announced address if you enable TLS in grpc server. -```yaml -node: - addresses: - - grpcs://frostfs.my.org:8080 - -grpc: - num: 1 - 0: - endpoint: frostfs.my.org:8080 - tls: - enabled: true - certificate: /path/to/cert - key: /path/to/key -``` diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml deleted file mode 100644 index d86ea451f..000000000 --- a/config/mainnet/config.yml +++ /dev/null @@ -1,70 +0,0 @@ -node: - wallet: - path: - address: - password: - addresses: - - - attribute_0: UN-LOCODE: - attribute_1: Price:100000 - attribute_2: User-Agent:FrostFS\/0.9999 - -grpc: - num: 1 - 0: - endpoint: - tls: - enabled: false - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/path/metabase - perm: 0600 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m - -logger: - level: info - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -object: - put: - remote_pool_size: 100 - local_pool_size: 100 - -morph: - rpc_endpoint: - - wss://rpc1.morph.frostfs.info:40341/ws - - wss://rpc2.morph.frostfs.info:40341/ws - - wss://rpc3.morph.frostfs.info:40341/ws - - wss://rpc4.morph.frostfs.info:40341/ws - - wss://rpc5.morph.frostfs.info:40341/ws - - wss://rpc6.morph.frostfs.info:40341/ws - - wss://rpc7.morph.frostfs.info:40341/ws - dial_timeout: 20s - -contracts: - balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 - container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 - netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 diff --git a/config/testnet/README.md b/config/testnet/README.md deleted file mode 100644 index e2cda33ec..000000000 --- a/config/testnet/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# N3 Testnet Storage node configuration - -There is a prepared configuration for NeoFS Storage Node deployment in -N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared -docker image and run it with docker-compose. - -## Build image - -Prepared **frostfs-storage-testnet** image is available at Docker Hub. -However, if you need to rebuild it for some reason, run -`make image-storage-testnet` command. - -``` -$ make image-storage-testnet -... -Successfully built ab0557117b02 -Successfully tagged nspccdev/neofs-storage-testnet:0.25.1 -``` - -## Deploy node - -To run a storage node in N3 Testnet environment, you should deposit GAS assets, -update docker-compose file and start the node. - -### Deposit - -The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a -bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx. - -First, obtain GAS in N3 Testnet chain. You can do that with -[faucet](https://neowish.ngd.network) service. - -Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet. -You can provide scripthash in the `data` argument of transfer tx to make a -deposit to a specified account. Otherwise, deposit is made to the tx sender. - -NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`, -so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`. - -See a deposit example with `neo-go`. - -``` -neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \ ---from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \ ---to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \ ---token GAS \ ---amount 1 -``` - -### Configure - -Next, configure `node_config.env` file. Change endpoints values. Both -should contain your **public** IP. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -``` - -Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory) -attribute. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED -``` - -You can validate UN/LOCODE attribute in -[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0) -with frostfs-cli. - -``` -$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED' -Country: Russia -Location: Saint Petersburg (ex Leningrad) -Continent: Europe -Subdivision: [SPE] Sankt-Peterburg -Coordinates: 59.53, 30.15 -``` - -It is recommended to pass the node's key as a file. To do so, convert your wallet -WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file. - -``` -// Print WIF in a 32-byte hex format -$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56 -PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059 -WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ -ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc -ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf - -// Save 32-byte hex into a file -$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key -``` - -Then, specify the path to this file in `docker-compose.yml` -```yaml - volumes: - - frostfs_storage:/storage - - ./my_wallet.key:/node.key -``` - - -NeoFS objects will be stored on your machine. By default, docker-compose -is configured to store objects in named docker volume `frostfs_storage`. You can -specify a directory on the filesystem to store objects there. - -```yaml - volumes: - - /home/username/frostfs/rc3/storage:/storage - - ./my_wallet.key:/node.key -``` - -### Start - -Run the node with `docker-compose up` command and stop it with `docker-compose down`. - -### Debug - -To print node logs, use `docker logs frostfs-testnet`. To print debug messages in -log, set up log level to debug with this env: - -```yaml - environment: - - NEOFS_LOGGER_LEVEL=debug -``` diff --git a/config/testnet/config.yml b/config/testnet/config.yml deleted file mode 100644 index 76b36cdf6..000000000 --- a/config/testnet/config.yml +++ /dev/null @@ -1,52 +0,0 @@ -logger: - level: info - -morph: - rpc_endpoint: - - wss://rpc01.morph.testnet.frostfs.info:51331/ws - - wss://rpc02.morph.testnet.frostfs.info:51331/ws - - wss://rpc03.morph.testnet.frostfs.info:51331/ws - - wss://rpc04.morph.testnet.frostfs.info:51331/ws - - wss://rpc05.morph.testnet.frostfs.info:51331/ws - - wss://rpc06.morph.testnet.frostfs.info:51331/ws - - wss://rpc07.morph.testnet.frostfs.info:51331/ws - dial_timeout: 20s - -contracts: - balance: e0420c216003747626670d1424569c17c79015bf - container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 - netmap: d4b331639799e2958d4bc5b711b469d79de94e01 - -node: - key: /node.key - attribute_0: Deployed:SelfHosted - attribute_1: User-Agent:FrostFS\/0.9999 - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/metabase - perm: 0777 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json index 6abf5ecdc..b68ce4fa3 100644 --- a/dev/.vscode-example/launch.json +++ b/dev/.vscode-example/launch.json @@ -42,7 +42,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080", @@ -98,7 +97,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082", @@ -154,7 +152,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084", @@ -210,7 +207,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086", diff --git a/docs/release-instruction.md b/docs/release-instruction.md index 18659c699..aa867e83c 100644 --- a/docs/release-instruction.md +++ b/docs/release-instruction.md @@ -95,19 +95,15 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} ## Post-release -### Prepare and push images to a Docker Hub (if not automated) +### Prepare and push images to a Docker registry (automated) -Create Docker images for all applications and push them into Docker Hub -(requires [organization](https://hub.docker.com/u/truecloudlab) privileges) +Create Docker images for all applications and push them into container registry +(executed automatically in Forgejo Actions upon pushing a release tag): ```shell $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ make images -$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION} +$ make push-images ``` ### Make a proper release (if not automated) diff --git a/docs/shard-modes.md b/docs/shard-modes.md index 3b459335b..6cc4ab13c 100644 --- a/docs/shard-modes.md +++ b/docs/shard-modes.md @@ -51,10 +51,7 @@ However, all mode changing operations are idempotent. ## Automatic mode changes -Shard can automatically switch to a `degraded-read-only` mode in 3 cases: -1. If the metabase was not available or couldn't be opened/initialized during shard startup. -2. If shard error counter exceeds threshold. -3. If the metabase couldn't be reopened during SIGHUP handling. +A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. # Detach shard diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 98d72cb69..da9fdfed0 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -12,21 +12,23 @@ There are some custom types used for brevity: # Structure -| Section | Description | -|------------------------|---------------------------------------------------------------------| -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | +| Section | Description | +|--------------|---------------------------------------------------------| +| `node` | [Node parameters](#node-section) | +| `logger` | [Logging parameters](#logger-section) | +| `pprof` | [PProf configuration](#pprof-section) | +| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | +| `control` | [Control service configuration](#control-section) | +| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | +| `morph` | [N3 blockchain client configuration](#morph-section) | +| `apiclient` | [FrostFS API client configuration](#apiclient-section) | +| `policer` | [Policer service configuration](#policer-section) | +| `replicator` | [Replicator service configuration](#replicator-section) | +| `storage` | [Storage engine configuration](#storage-section) | +| `runtime` | [Runtime configuration](#runtime-section) | +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | +| `qos` | [QoS configuration](#qos-section) | # `control` section ```yaml @@ -110,11 +112,21 @@ Contains logger parameters. ```yaml logger: level: info + tags: + - names: "main, morph" + level: debug ``` -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| Parameter | Type | Default value | Description | +|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| +| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | + +## `tags` subsection +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | +| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract @@ -147,15 +159,19 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 switch_interval: 2m + netmap: + candidates: + poll_interval: 20s ``` -| Parameter | Type | Default value | Description | -| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| Parameter | Type | Default value | Description | +|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | +| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | +| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | +| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | +| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | ## `rpc_endpoint` subsection | Parameter | Type | Default value | Description | @@ -169,7 +185,6 @@ Local storage engine configuration. | Parameter | Type | Default value | Description | |----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| -| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. | | `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | | `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | | `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | @@ -180,20 +195,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting `default` subsection has the same format and specifies defaults for missing values. The following table describes configuration for each shard. -| Parameter | Type | Default value | Description | -| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `compress` | `bool` | `false` | Flag to enable compression. | -| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| Parameter | Type | Default value | Description | +| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | +| `compression` | [Compression config](#compression-subsection) | | Compression config. | +| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | +| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | +| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | +| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | +| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | +| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | +| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | +| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | + +### `compression` subsection + +Contains compression config. + +```yaml +compression: + enabled: true + level: smallest_size + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 +``` + +| Parameter | Type | Default value | Description | +| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | `bool` | `false` | Flag to enable compression. | +| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | +| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | +| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | +| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | ### `blobstor` subsection @@ -208,7 +244,7 @@ blobstor: width: 4 - type: fstree path: /path/to/blobstor/blobovnicza - perm: 0644 + perm: 0o644 size: 4194304 depth: 1 width: 4 @@ -268,7 +304,7 @@ gc: ```yaml metabase: path: /path/to/meta.db - perm: 0644 + perm: 0o644 max_batch_size: 200 max_batch_delay: 20ms ``` @@ -300,6 +336,65 @@ writecache: | `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | | `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | +### `limits` subsection + +```yaml +limits: + max_read_running_ops: 10000 + max_read_waiting_ops: 1000 + max_write_running_ops: 1000 + max_write_waiting_ops: 100 + read: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 +``` + +| Parameter | Type | Default value | Description | +| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | +| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | +| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | +| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `read` | `[]tag` | empty | Array of shard read settings for tags. | +| `write` | `[]tag` | empty | Array of shard write settings for tags. | +| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | +| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | +| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | +| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | +| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | # `node` section @@ -315,22 +410,22 @@ node: - "Price:11" - "UN-LOCODE:RU MSK" - "key:value" - relay: false persistent_sessions: path: /sessions persistent_state: path: /state + locode_db_path: "/path/to/locode/db" ``` -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `relay` | `bool` | | Enable relay mode. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| Parameter | Type | Default value | Description | +|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| +| `key` | `string` | | Path to the binary-encoded private key. | +| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | +| `addresses` | `[]string` | | Addresses advertised in the netmap. | +| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | +| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | +| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | ## `wallet` subsection N3 wallet configuration. @@ -395,18 +490,16 @@ replicator: pool_size: 10 ``` -| Parameter | Type | Default value | Description | -|---------------|------------|----------------------------------------|---------------------------------------------| -| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | -| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. | +| Parameter | Type | Default value | Description | +|---------------|------------|---------------|---------------------------------------------| +| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | +| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. | # `object` section Contains object-service related parameters. ```yaml object: - put: - remote_pool_size: 100 get: priority: - $attribute:ClusterName @@ -415,10 +508,29 @@ object: | Parameter | Type | Default value | Description | |-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| | `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | -| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | | `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | + +# `rpc` section +Contains limits on the number of active RPC for specified method(s). + +```yaml +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 +``` + +| Parameter | Type | Default value | Description | +|------------------|------------|---------------|--------------------------------------------------------------| +| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) | +| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit | + # `runtime` section Contains runtime parameters. @@ -471,3 +583,20 @@ multinet: | `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | | `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | | `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | + +# `qos` section +```yaml +qos: + critical: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 +``` +| Parameter | Type | Default value | Description | +| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- | +| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. | +| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. | diff --git a/go.mod b/go.mod index c538a3178..6f1950936 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,18 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.22 +go 1.23.0 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 + git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 @@ -27,7 +28,7 @@ require ( github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.12.1 + github.com/multiformats/go-multiaddr v0.15.0 github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 @@ -40,15 +41,14 @@ require ( github.com/ssgreg/journald v1.0.0 github.com/stretchr/testify v1.9.0 go.etcd.io/bbolt v1.3.10 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - golang.org/x/term v0.21.0 - google.golang.org/grpc v1.66.2 - google.golang.org/protobuf v1.34.2 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 + golang.org/x/term v0.30.0 + google.golang.org/grpc v1.69.2 + google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 ) @@ -85,9 +85,9 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -119,17 +119,18 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 064f3274e..5b075f60a 100644 --- a/go.sum +++ b/go.sum @@ -1,23 +1,25 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 h1:o3iqVmbvFsfe8kpB2Hvuix6Q/tAhbiPLP91xK4lmoBQ= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 h1:sepm9FeuoInmygH1K/+3L+Yp5bJhGiVi/oGCH6Emp2c= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= @@ -106,6 +108,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -141,14 +145,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -188,8 +192,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= -github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= +github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= +github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -290,20 +294,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -318,15 +324,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -339,16 +345,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -375,16 +381,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -392,26 +398,26 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -420,8 +426,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -439,7 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/internal/assert/cond.go b/internal/assert/cond.go new file mode 100644 index 000000000..113d2eba9 --- /dev/null +++ b/internal/assert/cond.go @@ -0,0 +1,29 @@ +package assert + +import ( + "fmt" + "strings" +) + +func True(cond bool, details ...string) { + if !cond { + panic(strings.Join(details, " ")) + } +} + +func False(cond bool, details ...string) { + if cond { + panic(strings.Join(details, " ")) + } +} + +func NoError(err error, details ...string) { + if err != nil { + content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) + panic(content) + } +} + +func Fail(details ...string) { + panic(strings.Join(details, " ")) +} diff --git a/internal/logs/logs.go b/internal/logs/logs.go index d0bac4d11..626372f43 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -125,7 +125,6 @@ const ( SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" SearchLocalOperationFailed = "local operation failed" UtilObjectServiceError = "object service error" - UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" @@ -146,7 +145,6 @@ const ( ClientCantGetBlockchainHeight = "can't get blockchain height" ClientCantGetBlockchainHeight243 = "can't get blockchain height" EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" - EventCouldNotStartListenToEvents = "could not start listen to events" EventStopEventListenerByError = "stop event listener by error" EventStopEventListenerByContext = "stop event listener by context" EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" @@ -164,17 +162,9 @@ const ( EventNotaryParserNotSet = "notary parser not set" EventCouldNotParseNotaryEvent = "could not parse notary event" EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" - EventIgnoreNilEventParser = "ignore nil event parser" - EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" EventRegisteredNewEventParser = "registered new event parser" - EventIgnoreNilEventHandler = "ignore nil event handler" - EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" EventRegisteredNewEventHandler = "registered new event handler" - EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" - EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" - EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" - EventIgnoreNilBlockHandler = "ignore nil block handler" StorageOperation = "local object storage operation" BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" BlobovniczaOpeningBoltDB = "opening BoltDB" @@ -208,6 +198,7 @@ const ( EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" EngineInterruptGettingLockers = "can't get object's lockers" EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" + EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" @@ -262,6 +253,7 @@ const ( ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" + ShardCouldNotFindObject = "could not find object" WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" @@ -392,7 +384,6 @@ const ( FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" FrostFSNodeConfigurationReading = "configuration reading" - FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" FrostFSNodeTracingConfigationUpdated = "tracing configation updated" FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" @@ -520,4 +511,11 @@ const ( BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" WritecacheCantGetObject = "can't get an object from fstree" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" + FailedToParseIncomingIOTag = "failed to parse incoming IO tag" + NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" + FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" + FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" + WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" + FailedToUpdateNetmapCandidates = "update netmap candidates failed" + UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" ) diff --git a/internal/metrics/application.go b/internal/metrics/application.go index 8bc408ab6..53acf9b7f 100644 --- a/internal/metrics/application.go +++ b/internal/metrics/application.go @@ -12,8 +12,9 @@ type ApplicationInfo struct { func NewApplicationInfo(version string) *ApplicationInfo { appInfo := &ApplicationInfo{ versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Name: "app_info", - Help: "General information about the application.", + Namespace: namespace, + Name: "app_info", + Help: "General information about the application.", }, []string{"version"}), } appInfo.versionValue.With(prometheus.Labels{"version": version}) diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go index cb165de69..9123541ff 100644 --- a/internal/metrics/consts.go +++ b/internal/metrics/consts.go @@ -23,6 +23,7 @@ const ( policerSubsystem = "policer" commonCacheSubsystem = "common_cache" multinetSubsystem = "multinet" + qosSubsystem = "qos" successLabel = "success" shardIDLabel = "shard_id" @@ -43,6 +44,7 @@ const ( hitLabel = "hit" cacheLabel = "cache" sourceIPLabel = "source_ip" + ioTagLabel = "io_tag" readWriteMode = "READ_WRITE" readOnlyMode = "READ_ONLY" diff --git a/internal/metrics/node.go b/internal/metrics/node.go index 4ea3c7c24..8ade19eb2 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -26,6 +26,7 @@ type NodeMetrics struct { morphCache *morphCacheMetrics log logger.LogMetrics multinet *multinetMetrics + qos *QoSMetrics // nolint: unused appInfo *ApplicationInfo } @@ -55,6 +56,7 @@ func NewNodeMetrics() *NodeMetrics { log: logger.NewLogMetrics(namespace), appInfo: NewApplicationInfo(misc.Version), multinet: newMultinetMetrics(namespace), + qos: newQoSMetrics(), } } @@ -126,3 +128,7 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics { func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { return m.multinet } + +func (m *NodeMetrics) QoSMetrics() *QoSMetrics { + return m.qos +} diff --git a/internal/metrics/object.go b/internal/metrics/object.go index 0ba994ed3..e4f6dfde1 100644 --- a/internal/metrics/object.go +++ b/internal/metrics/object.go @@ -9,13 +9,14 @@ import ( ) type ObjectServiceMetrics interface { - AddRequestDuration(method string, d time.Duration, success bool) + AddRequestDuration(method string, d time.Duration, success bool, ioTag string) AddPayloadSize(method string, size int) } type objectServiceMetrics struct { - methodDuration *prometheus.HistogramVec - payloadCounter *prometheus.CounterVec + methodDuration *prometheus.HistogramVec + payloadCounter *prometheus.CounterVec + ioTagOpsCounter *prometheus.CounterVec } func newObjectServiceMetrics() *objectServiceMetrics { @@ -32,14 +33,24 @@ func newObjectServiceMetrics() *objectServiceMetrics { Name: "request_payload_bytes", Help: "Object Service request payload", }, []string{methodLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: objectSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } -func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) { +func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { m.methodDuration.With(prometheus.Labels{ methodLabel: method, successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: method, + }).Inc() } func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go new file mode 100644 index 000000000..be6878142 --- /dev/null +++ b/internal/metrics/qos.go @@ -0,0 +1,52 @@ +package metrics + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type QoSMetrics struct { + opsCounter *prometheus.GaugeVec +} + +func newQoSMetrics() *QoSMetrics { + return &QoSMetrics{ + opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: qosSubsystem, + Name: "operations_total", + Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", + }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), + } +} + +func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) { + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "pending", + }).Set(float64(pending)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "in_progress", + }).Set(float64(inProgress)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "completed", + }).Set(float64(completed)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "resource_exhausted", + }).Set(float64(resourceExhausted)) +} + +func (m *QoSMetrics) Close(shardID string) { + m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) +} diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go index 6702aa83c..e192c4398 100644 --- a/internal/metrics/treeservice.go +++ b/internal/metrics/treeservice.go @@ -12,12 +12,14 @@ type TreeMetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type treeServiceMetrics struct { replicateTaskDuration *prometheus.HistogramVec replicateWaitDuration *prometheus.HistogramVec syncOpDuration *prometheus.HistogramVec + ioTagOpsCounter *prometheus.CounterVec } var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) @@ -42,6 +44,12 @@ func newTreeServiceMetrics() *treeServiceMetrics { Name: "sync_duration_seconds", Help: "Duration of synchronization operations", }, []string{successLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: treeServiceSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } @@ -62,3 +70,10 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) { successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) } + +func (m *treeServiceMetrics) AddOperation(op string, ioTag string) { + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: op, + }).Inc() +} diff --git a/internal/qos/config.go b/internal/qos/config.go new file mode 100644 index 000000000..d90b403b5 --- /dev/null +++ b/internal/qos/config.go @@ -0,0 +1,31 @@ +package qos + +import ( + "math" + "time" +) + +const ( + NoLimit int64 = math.MaxInt64 + DefaultIdleTimeout = 5 * time.Minute +) + +type LimiterConfig struct { + Read OpConfig + Write OpConfig +} + +type OpConfig struct { + MaxWaitingOps int64 + MaxRunningOps int64 + IdleTimeout time.Duration + Tags []IOTagConfig +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 + Prohibited bool +} diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go new file mode 100644 index 000000000..58cd9e52c --- /dev/null +++ b/internal/qos/grpc.go @@ -0,0 +1,86 @@ +package qos + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "google.golang.org/grpc" +) + +func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String()) + return handler(ctx, req) + } +} + +func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return invoker(ctx, method, req, reply, cc, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag.IsLocal() { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return invoker(ctx, method, req, reply, cc, opts...) + } +} + +func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return streamer(ctx, desc, cc, method, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag.IsLocal() { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return streamer(ctx, desc, cc, method, opts...) + } +} + +func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() { + return handler(ctx, req) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return nil, new(apistatus.ResourceExhausted) + } + defer release() + + return handler(ctx, req) + } +} + +//nolint:contextcheck (grpc.ServerStream manages the context itself) +func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() { + return handler(srv, ss) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return new(apistatus.ResourceExhausted) + } + defer release() + + return handler(srv, ss) + } +} diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go new file mode 100644 index 000000000..7d0826754 --- /dev/null +++ b/internal/qos/grpc_test.go @@ -0,0 +1,219 @@ +package qos_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +const ( + okKey = "ok" +) + +var ( + errTest = errors.New("mock") + errWrongTag = errors.New("wrong tag") + errNoTag = errors.New("failed to get tag from context") + errResExhausted *apistatus.ResourceExhausted + tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} +) + +type mockGRPCServerStream struct { + grpc.ServerStream + + ctx context.Context +} + +func (m *mockGRPCServerStream) Context() context.Context { + return m.ctx +} + +type limiter struct { + acquired bool + released bool +} + +func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { + l.acquired = true + if key != okKey { + return nil, false + } + return func() { l.released = true }, true +} + +func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(ctx context.Context, req any) (any, error) { + return nil, errTest + } + _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) + return err +} + +func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(srv any, stream grpc.ServerStream) error { + return errTest + } + err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ + FullMethod: methodName, + }, handler) + return err +} + +func Test_MaxActiveRPCLimiter(t *testing.T) { + // UnaryServerInterceptor + t.Run("unary fail", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := unaryMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) + // StreamServerInterceptor + t.Run("stream fail", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := streamMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) +} + +func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { + interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() + called := false + handler := func(ctx context.Context, req any) (any, error) { + called = true + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { + return nil, nil + } + return nil, errWrongTag + } + _, err := interceptor(context.Background(), nil, nil, handler) + require.NoError(t, err) + require.True(t, called) +} + +func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() + + // check context with no value + called := false + invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil + } + require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) + require.True(t, called) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return errNoTag + } + if raw != targetTag { + return errWrongTag + } + return nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) +} + +func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() + + // check context with no value + called := false + streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil, nil + } + _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) + require.True(t, called) + require.NoError(t, err) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return nil, errNoTag + } + if raw != targetTag { + return nil, errWrongTag + } + return nil, nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + _, err := interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + _, err = interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) +} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go new file mode 100644 index 000000000..2d7de32fc --- /dev/null +++ b/internal/qos/limiter.go @@ -0,0 +1,246 @@ +package qos + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" +) + +const ( + defaultIdleTimeout time.Duration = 0 + defaultShare float64 = 1.0 + minusOne = ^uint64(0) + + defaultMetricsCollectTimeout = 5 * time.Second +) + +type ReleaseFunc scheduling.ReleaseFunc + +type Limiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) + SetParentID(string) + SetMetrics(Metrics) + Close() +} + +type scheduler interface { + RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error) + Close() +} + +func NewLimiter(c LimiterConfig) (Limiter, error) { + if err := c.Validate(); err != nil { + return nil, err + } + readScheduler, err := createScheduler(c.Read) + if err != nil { + return nil, fmt.Errorf("create read scheduler: %w", err) + } + writeScheduler, err := createScheduler(c.Write) + if err != nil { + return nil, fmt.Errorf("create write scheduler: %w", err) + } + l := &mClockLimiter{ + readScheduler: readScheduler, + writeScheduler: writeScheduler, + closeCh: make(chan struct{}), + wg: &sync.WaitGroup{}, + readStats: createStats(), + writeStats: createStats(), + } + l.shardID.Store(&shardID{}) + l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}}) + l.startMetricsCollect() + return l, nil +} + +func createScheduler(config OpConfig) (scheduler, error) { + if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { + return newSemaphoreScheduler(config.MaxRunningOps), nil + } + return scheduling.NewMClock( + uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps), + converToSchedulingTags(config.Tags), config.IdleTimeout) +} + +func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { + result := make(map[string]scheduling.TagInfo) + for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { + result[tag.String()] = scheduling.TagInfo{ + Share: defaultShare, + } + } + for _, l := range limits { + v := result[l.Tag] + if l.Weight != nil && *l.Weight != 0 { + v.Share = *l.Weight + } + if l.LimitOps != nil && *l.LimitOps != 0 { + v.LimitIOPS = l.LimitOps + } + if l.ReservedOps != nil && *l.ReservedOps != 0 { + v.ReservedIOPS = l.ReservedOps + } + v.Prohibited = l.Prohibited + result[l.Tag] = v + } + return result +} + +var ( + _ Limiter = (*noopLimiter)(nil) + releaseStub ReleaseFunc = func() {} + noopLimiterInstance = &noopLimiter{} +) + +func NewNoopLimiter() Limiter { + return noopLimiterInstance +} + +type noopLimiter struct{} + +func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) SetParentID(string) {} + +func (n *noopLimiter) Close() {} + +func (n *noopLimiter) SetMetrics(Metrics) {} + +var _ Limiter = (*mClockLimiter)(nil) + +type shardID struct { + id string +} + +type mClockLimiter struct { + readScheduler scheduler + writeScheduler scheduler + + readStats map[string]*stat + writeStats map[string]*stat + + shardID atomic.Pointer[shardID] + metrics atomic.Pointer[metricsHolder] + closeCh chan struct{} + wg *sync.WaitGroup +} + +func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.readScheduler, n.readStats) +} + +func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.writeScheduler, n.writeStats) +} + +func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = IOTagClient.String() + } + stat := getStat(tag, stats) + stat.pending.Add(1) + if tag == IOTagCritical.String() { + stat.inProgress.Add(1) + return func() { + stat.completed.Add(1) + }, nil + } + rel, err := s.RequestArrival(ctx, tag) + stat.inProgress.Add(1) + if err != nil { + if isResourceExhaustedErr(err) { + stat.resourceExhausted.Add(1) + return nil, &apistatus.ResourceExhausted{} + } + stat.completed.Add(1) + return nil, err + } + return func() { + rel() + stat.completed.Add(1) + }, nil +} + +func (n *mClockLimiter) Close() { + n.readScheduler.Close() + n.writeScheduler.Close() + close(n.closeCh) + n.wg.Wait() + n.metrics.Load().metrics.Close(n.shardID.Load().id) +} + +func (n *mClockLimiter) SetParentID(parentID string) { + n.shardID.Store(&shardID{id: parentID}) +} + +func (n *mClockLimiter) SetMetrics(m Metrics) { + n.metrics.Store(&metricsHolder{metrics: m}) +} + +func (n *mClockLimiter) startMetricsCollect() { + n.wg.Add(1) + go func() { + defer n.wg.Done() + + ticker := time.NewTicker(defaultMetricsCollectTimeout) + defer ticker.Stop() + for { + select { + case <-n.closeCh: + return + case <-ticker.C: + shardID := n.shardID.Load().id + if shardID == "" { + continue + } + metrics := n.metrics.Load().metrics + exportMetrics(metrics, n.readStats, shardID, "read") + exportMetrics(metrics, n.writeStats, shardID, "write") + } + } + }() +} + +func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) { + var pending uint64 + var inProgress uint64 + var completed uint64 + var resExh uint64 + for tag, s := range stats { + pending = s.pending.Load() + inProgress = s.inProgress.Load() + completed = s.completed.Load() + resExh = s.resourceExhausted.Load() + if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 { + continue + } + metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) + } +} + +func isResourceExhaustedErr(err error) bool { + return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || + errors.Is(err, errSemaphoreLimitExceeded) || + errors.Is(err, scheduling.ErrTagRequestsProhibited) +} diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go new file mode 100644 index 000000000..c00da51b7 --- /dev/null +++ b/internal/qos/metrics.go @@ -0,0 +1,31 @@ +package qos + +import "sync/atomic" + +type Metrics interface { + SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) + Close(shardID string) +} + +var _ Metrics = (*noopMetrics)(nil) + +type noopMetrics struct{} + +func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) { +} + +func (n *noopMetrics) Close(string) {} + +// stat presents limiter statistics cumulative counters. +// +// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`. +type stat struct { + completed atomic.Uint64 + pending atomic.Uint64 + resourceExhausted atomic.Uint64 + inProgress atomic.Uint64 +} + +type metricsHolder struct { + metrics Metrics +} diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go new file mode 100644 index 000000000..74e6928f3 --- /dev/null +++ b/internal/qos/semaphore.go @@ -0,0 +1,39 @@ +package qos + +import ( + "context" + "errors" + + qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" +) + +var ( + _ scheduler = (*semaphore)(nil) + errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded") +) + +type semaphore struct { + s *qosSemaphore.Semaphore +} + +func newSemaphoreScheduler(size int64) *semaphore { + return &semaphore{ + s: qosSemaphore.NewSemaphore(size), + } +} + +func (s *semaphore) Close() {} + +func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if s.s.Acquire() { + return s.s.Release, nil + } + return nil, errSemaphoreLimitExceeded +} diff --git a/internal/qos/stats.go b/internal/qos/stats.go new file mode 100644 index 000000000..3ecfad9f9 --- /dev/null +++ b/internal/qos/stats.go @@ -0,0 +1,29 @@ +package qos + +const unknownStatsTag = "unknown" + +var statTags = map[string]struct{}{ + IOTagBackground.String(): {}, + IOTagClient.String(): {}, + IOTagCritical.String(): {}, + IOTagInternal.String(): {}, + IOTagPolicer.String(): {}, + IOTagTreeSync.String(): {}, + IOTagWritecache.String(): {}, + unknownStatsTag: {}, +} + +func createStats() map[string]*stat { + result := make(map[string]*stat) + for tag := range statTags { + result[tag] = &stat{} + } + return result +} + +func getStat(tag string, stats map[string]*stat) *stat { + if v, ok := stats[tag]; ok { + return v + } + return stats[unknownStatsTag] +} diff --git a/internal/qos/tags.go b/internal/qos/tags.go new file mode 100644 index 000000000..e3f7cafd6 --- /dev/null +++ b/internal/qos/tags.go @@ -0,0 +1,59 @@ +package qos + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" +) + +type IOTag string + +const ( + IOTagBackground IOTag = "background" + IOTagClient IOTag = "client" + IOTagCritical IOTag = "critical" + IOTagInternal IOTag = "internal" + IOTagPolicer IOTag = "policer" + IOTagTreeSync IOTag = "treesync" + IOTagWritecache IOTag = "writecache" + + ioTagUnknown IOTag = "" +) + +func FromRawString(s string) (IOTag, error) { + switch s { + case string(IOTagBackground): + return IOTagBackground, nil + case string(IOTagClient): + return IOTagClient, nil + case string(IOTagCritical): + return IOTagCritical, nil + case string(IOTagInternal): + return IOTagInternal, nil + case string(IOTagPolicer): + return IOTagPolicer, nil + case string(IOTagTreeSync): + return IOTagTreeSync, nil + case string(IOTagWritecache): + return IOTagWritecache, nil + default: + return ioTagUnknown, fmt.Errorf("unknown tag %s", s) + } +} + +func (t IOTag) String() string { + return string(t) +} + +func IOTagFromContext(ctx context.Context) string { + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = "undefined" + } + return tag +} + +func (t IOTag) IsLocal() bool { + return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go new file mode 100644 index 000000000..70f1f24e8 --- /dev/null +++ b/internal/qos/validate.go @@ -0,0 +1,91 @@ +package qos + +import ( + "errors" + "fmt" + "math" +) + +var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") + +type tagConfig struct { + Shares, Limit, Reserved *float64 +} + +func (c *LimiterConfig) Validate() error { + if err := validateOpConfig(c.Read); err != nil { + return fmt.Errorf("limits 'read' section validation error: %w", err) + } + if err := validateOpConfig(c.Write); err != nil { + return fmt.Errorf("limits 'write' section validation error: %w", err) + } + return nil +} + +func validateOpConfig(c OpConfig) error { + if c.MaxRunningOps <= 0 { + return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) + } + if c.MaxWaitingOps <= 0 { + return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps) + } + if c.IdleTimeout <= 0 { + return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String()) + } + if err := validateTags(c.Tags); err != nil { + return fmt.Errorf("'tags' config section validation error: %w", err) + } + return nil +} + +func validateTags(configTags []IOTagConfig) error { + tags := map[IOTag]tagConfig{ + IOTagBackground: {}, + IOTagClient: {}, + IOTagInternal: {}, + IOTagPolicer: {}, + IOTagTreeSync: {}, + IOTagWritecache: {}, + } + for _, t := range configTags { + tag, err := FromRawString(t.Tag) + if err != nil { + return fmt.Errorf("invalid tag %s: %w", t.Tag, err) + } + if _, ok := tags[tag]; !ok { + return fmt.Errorf("tag %s is not configurable", t.Tag) + } + tags[tag] = tagConfig{ + Shares: t.Weight, + Limit: t.LimitOps, + Reserved: t.ReservedOps, + } + } + idx := 0 + var shares float64 + for t, v := range tags { + if idx == 0 { + idx++ + shares = float64Value(v.Shares) + } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { + return errWeightsMustBeSpecified + } + if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { + return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) + } + if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { + return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) + } + if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { + return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String()) + } + } + return nil +} + +func float64Value(f *float64) float64 { + if f == nil { + return 0.0 + } + return *f +} diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go index 953b91a79..8cbb1cce9 100644 --- a/pkg/ape/contract_storage/proxy.go +++ b/pkg/ape/contract_storage/proxy.go @@ -31,9 +31,7 @@ type RPCActorProvider interface { type ProxyVerificationContractStorage struct { rpcActorProvider RPCActorProvider - acc *wallet.Account - - proxyScriptHash util.Uint160 + cosigners []actor.SignerAccount policyScriptHash util.Uint160 } @@ -41,12 +39,27 @@ type ProxyVerificationContractStorage struct { var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil) func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage { + acc := wallet.NewAccountFromPrivateKey(key) return &ProxyVerificationContractStorage{ rpcActorProvider: rpcActorProvider, - acc: wallet.NewAccountFromPrivateKey(key), - - proxyScriptHash: proxyScriptHash, + cosigners: []actor.SignerAccount{ + { + Signer: transaction.Signer{ + Account: proxyScriptHash, + Scopes: transaction.CustomContracts, + AllowedContracts: []util.Uint160{policyScriptHash}, + }, + Account: notary.FakeContractAccount(proxyScriptHash), + }, + { + Signer: transaction.Signer{ + Account: acc.Contract.ScriptHash(), + Scopes: transaction.CalledByEntry, + }, + Account: acc, + }, + }, policyScriptHash: policyScriptHash, } @@ -64,7 +77,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke { func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) { rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash)) + act, err := actor.New(rpcActor, contractStorage.cosigners) if err != nil { return nil, err } @@ -98,31 +111,16 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na // ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners. func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { - // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but - // ProxyVerificationContractStorage does not manage reconnections. - contractStorageActor, err := contractStorage.newContractStorageActor() - if err != nil { - return nil, err - } - return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) + rpcActor := contractStorage.rpcActorProvider.GetRPCActor() + inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor} + return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) } -func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount { - return []actor.SignerAccount{ - { - Signer: transaction.Signer{ - Account: proxyScriptHash, - Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{policyScriptHash}, - }, - Account: notary.FakeContractAccount(proxyScriptHash), - }, - { - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CalledByEntry, - }, - Account: acc, - }, - } +type invokerAdapter struct { + *invoker.Invoker + rpcInvoker invoker.RPCInvoke +} + +func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { + return n.rpcInvoker } diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go index c0413678d..d32bd4a07 100644 --- a/pkg/ape/request/frostfsid.go +++ b/pkg/ape/request/frostfsid.go @@ -1,6 +1,7 @@ package request import ( + "context" "fmt" "strconv" "strings" @@ -12,9 +13,9 @@ import ( ) // FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. -func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { +func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { reqProps := make(map[string]string) - subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) + subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) @@ -36,8 +37,8 @@ func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvide } // Groups return the actor's group ids from frostfsid contract. -func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { - subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) +func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { + subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go index d4bc0cf68..91ee5c6c3 100644 --- a/pkg/core/client/util.go +++ b/pkg/core/client/util.go @@ -3,6 +3,7 @@ package client import ( "bytes" "fmt" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -19,7 +20,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro // Args must not be nil. func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { PublicKey() []byte - IterateAddresses(func(string) bool) + Addresses() iter.Seq[string] NumberOfAddresses() int ExternalAddresses() []string }, diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go index 62cc21553..1c52d93e7 100644 --- a/pkg/core/container/info.go +++ b/pkg/core/container/info.go @@ -1,6 +1,7 @@ package container import ( + "context" "sync" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" @@ -19,7 +20,7 @@ type infoValue struct { } type InfoProvider interface { - Info(id cid.ID) (Info, error) + Info(ctx context.Context, id cid.ID) (Info, error) } type infoProvider struct { @@ -43,13 +44,13 @@ func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider { } } -func (r *infoProvider) Info(id cid.ID) (Info, error) { +func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { v, found := r.tryGetFromCache(id) if found { return v.info, v.err } - return r.getFromSource(id) + return r.getFromSource(ctx, id) } func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { @@ -60,7 +61,7 @@ func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { return value, found } -func (r *infoProvider) getFromSource(id cid.ID) (Info, error) { +func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { r.kl.Lock(id) defer r.kl.Unlock(id) @@ -75,11 +76,11 @@ func (r *infoProvider) getFromSource(id cid.ID) (Info, error) { return Info{}, r.sourceErr } - cnr, err := r.source.Get(id) + cnr, err := r.source.Get(ctx, id) var civ infoValue if err != nil { if client.IsErrContainerNotFound(err) { - removed, err := WasRemoved(r.source, id) + removed, err := WasRemoved(ctx, r.source, id) if err != nil { civ.err = err } else { diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go index ba4404546..4eb14e53c 100644 --- a/pkg/core/container/storage.go +++ b/pkg/core/container/storage.go @@ -1,6 +1,8 @@ package container import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -41,9 +43,9 @@ type Source interface { // // Implementations must not retain the container pointer and modify // the container through it. - Get(cid.ID) (*Container, error) + Get(ctx context.Context, cid cid.ID) (*Container, error) - DeletionInfo(cid.ID) (*DelInfo, error) + DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) } // EACL groups information about the FrostFS container's extended ACL stored in diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index d27556807..61c568052 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -1,6 +1,7 @@ package container import ( + "context" "errors" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -10,8 +11,8 @@ import ( // WasRemoved checks whether the container ever existed or // it just has not been created yet at the current epoch. -func WasRemoved(s Source, cid cid.ID) (bool, error) { - _, err := s.DeletionInfo(cid) +func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { + _, err := s.DeletionInfo(ctx, cid) if err == nil { return true, nil } @@ -25,10 +26,10 @@ func WasRemoved(s Source, cid cid.ID) (bool, error) { // IsIndexedContainer returns True if container attributes should be indexed. func IsIndexedContainer(cnr containerSDK.Container) bool { var isS3Container bool - cnr.IterateAttributes(func(key, _ string) { + for key := range cnr.Attributes() { if key == ".s3-location-constraint" { isS3Container = true } - }) + } return !isS3Container } diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go index ecfd0eb15..e752043d3 100644 --- a/pkg/core/frostfsid/subject_provider.go +++ b/pkg/core/frostfsid/subject_provider.go @@ -1,6 +1,8 @@ package frostfsid import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -11,6 +13,6 @@ const ( // SubjectProvider interface provides methods to get subject from FrostfsID contract. type SubjectProvider interface { - GetSubject(util.Uint160) (*client.Subject, error) - GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error) + GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) + GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) } diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index b0c9e1f9e..e58e42634 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -1,6 +1,10 @@ package netmap -import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +import ( + "iter" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) // Node is a named type of netmap.NodeInfo which provides interface needed // in the current repository. Node is expected to be used everywhere instead @@ -14,10 +18,20 @@ func (x Node) PublicKey() []byte { return (netmap.NodeInfo)(x).PublicKey() } +// Addresses returns an iterator over all announced network addresses. +func (x Node) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() +} + // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. +// Deprecated: use [Node.Addresses] instead. func (x Node) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) + for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { + if f(s) { + return + } + } } // NumberOfAddresses returns number of announced network addresses. diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go index 7770c61c7..97313da84 100644 --- a/pkg/core/netmap/storage.go +++ b/pkg/core/netmap/storage.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -16,7 +18,7 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMap(diff uint64) (*netmap.NetMap, error) + GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) // GetNetMapByEpoch reads network map by the epoch number from the storage. // It returns the pointer to the requested network map and any error encountered. @@ -25,21 +27,21 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) // Epoch reads the current epoch from the storage. // It returns thw number of the current epoch and any error encountered. // // Must return exactly one non-default value. - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } // GetLatestNetworkMap requests and returns the latest network map from the storage. -func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) { - return src.GetNetMap(0) +func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { + return src.GetNetMap(ctx, 0) } // GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. -func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) { - return src.GetNetMap(1) +func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { + return src.GetNetMap(ctx, 1) } diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 19b5d34e4..cf090eb37 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -199,7 +199,7 @@ func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSD cnrIDBin := make([]byte, sha256.Size) cnrID.Encode(cnrIDBin) - cnr, err := v.containers.Get(cnrID) + cnr, err := v.containers.Get(ctx, cnrID) if err != nil { return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) } diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 20560cf3a..dc336eb34 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -410,11 +411,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -483,12 +484,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -559,12 +560,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -578,7 +579,7 @@ type testIRSource struct { irNodes [][]byte } -func (s *testIRSource) InnerRingKeys() ([][]byte, error) { +func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { return s.irNodes, nil } @@ -586,36 +587,13 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } - -type testNetmapSource struct { - netmaps map[uint64]*netmap.NetMap - currentEpoch uint64 -} - -func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) { - if diff >= s.currentEpoch { - return nil, fmt.Errorf("invalid diff") - } - return s.GetNetMapByEpoch(s.currentEpoch - diff) -} - -func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.netmaps[epoch]; found { - return nm, nil - } - return nil, fmt.Errorf("netmap not found") -} - -func (s *testNetmapSource) Epoch() (uint64, error) { - return s.currentEpoch, nil -} diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go index 67c9a3188..aab12ebf9 100644 --- a/pkg/core/object/info.go +++ b/pkg/core/object/info.go @@ -13,6 +13,13 @@ type ECInfo struct { Total uint32 } +func (v *ECInfo) String() string { + if v == nil { + return "" + } + return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) +} + // Info groups object address with its FrostFS // object info. type Info struct { @@ -23,5 +30,5 @@ type Info struct { } func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject) + return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) } diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index 3b3650134..3733ed507 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -18,7 +18,7 @@ import ( ) type InnerRing interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type SenderClassifier struct { @@ -63,11 +63,11 @@ func (c SenderClassifier) Classify( } func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { - isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) + isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) if err != nil { // do not throw error, try best case matching c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, - zap.String("error", err.Error())) + zap.Error(err)) } else if isInnerRingNode { return &ClassifyResult{ Role: acl.RoleInnerRing, @@ -78,13 +78,13 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK binCnr := make([]byte, sha256.Size) idCnr.Encode(binCnr) - isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr) + isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) if err != nil { // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, - zap.String("error", err.Error())) + zap.Error(err)) } else if isContainerNode { return &ClassifyResult{ Role: acl.RoleContainer, @@ -99,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK }, nil } -func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { - innerRingKeys, err := c.innerRing.InnerRingKeys() +func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { + innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) if err != nil { return false, err } @@ -116,10 +116,11 @@ func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { } func (c SenderClassifier) isContainerKey( + ctx context.Context, owner, idCnr []byte, cnr container.Container, ) (bool, error) { - nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap + nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap if err != nil { return false, err } @@ -133,7 +134,7 @@ func (c SenderClassifier) isContainerKey( // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = core.GetPreviousNetworkMap(c.netmap) + nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) if err != nil { return false, err } diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go index c4de07a5f..dfada764a 100644 --- a/pkg/innerring/bindings.go +++ b/pkg/innerring/bindings.go @@ -8,7 +8,6 @@ type ( // ContractProcessor interface defines functions for binding event producers // such as event.Listener and Timers with contract processor. ContractProcessor interface { - ListenerNotificationParsers() []event.NotificationParserInfo ListenerNotificationHandlers() []event.NotificationHandlerInfo ListenerNotaryParsers() []event.NotaryParserInfo ListenerNotaryHandlers() []event.NotaryHandlerInfo @@ -16,11 +15,6 @@ type ( ) func connectListenerWithProcessor(l event.Listener, p ContractProcessor) { - // register notification parsers - for _, parser := range p.ListenerNotificationParsers() { - l.SetNotificationParser(parser) - } - // register notification handlers for _, handler := range p.ListenerNotificationHandlers() { l.RegisterNotificationHandler(handler) diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go index 4a80ebf3b..7deec3f31 100644 --- a/pkg/innerring/fetcher.go +++ b/pkg/innerring/fetcher.go @@ -1,6 +1,8 @@ package innerring import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -47,12 +49,12 @@ type IrFetcherWithoutNotary struct { // InnerRingKeys fetches list of innerring keys from NeoFSAlphabet // role in the sidechain. -func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) { - return fN.cli.NeoFSAlphabetList() +func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { + return fN.cli.NeoFSAlphabetList(ctx) } // InnerRingKeys fetches list of innerring keys from netmap contract // in the sidechain. -func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) { - return f.nm.GetInnerRingList() +func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { + return f.nm.GetInnerRingList(ctx) } diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go index 45135a57b..439400bac 100644 --- a/pkg/innerring/indexer.go +++ b/pkg/innerring/indexer.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sync" "time" @@ -10,7 +11,7 @@ import ( type ( irFetcher interface { - InnerRingKeys() (keys.PublicKeys, error) + InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) } committeeFetcher interface { @@ -45,7 +46,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK } } -func (s *innerRingIndexer) update() (ind indexes, err error) { +func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { s.RLock() if time.Since(s.lastAccess) < s.timeout { @@ -62,7 +63,7 @@ func (s *innerRingIndexer) update() (ind indexes, err error) { return s.ind, nil } - innerRing, err := s.irFetcher.InnerRingKeys() + innerRing, err := s.irFetcher.InnerRingKeys(ctx) if err != nil { return indexes{}, err } @@ -81,8 +82,8 @@ func (s *innerRingIndexer) update() (ind indexes, err error) { return s.ind, nil } -func (s *innerRingIndexer) InnerRingIndex() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -90,8 +91,8 @@ func (s *innerRingIndexer) InnerRingIndex() (int32, error) { return ind.innerRingIndex, nil } -func (s *innerRingIndexer) InnerRingSize() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -99,8 +100,8 @@ func (s *innerRingIndexer) InnerRingSize() (int32, error) { return ind.innerRingSize, nil } -func (s *innerRingIndexer) AlphabetIndex() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go index c8a819b5b..f8201b7df 100644 --- a/pkg/innerring/indexer_test.go +++ b/pkg/innerring/indexer_test.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sync/atomic" "testing" @@ -37,15 +38,15 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(2), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(3), size, "invalid IR size") }) @@ -56,11 +57,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(0), idx, "invalid IR index") }) @@ -71,11 +72,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") }) @@ -100,30 +101,30 @@ func TestIndexerCachesIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -132,15 +133,15 @@ func TestIndexerCachesIndexes(t *testing.T) { time.Sleep(2 * time.Second) - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -165,15 +166,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") @@ -189,15 +190,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer = newInnerRingIndexer(cf, irf, key, time.Second) - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") } @@ -219,7 +220,7 @@ type testIRFetcher struct { calls atomic.Int32 } -func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { f.calls.Add(1) return f.keys, f.err } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 25f4ff034..3d236641e 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -38,10 +38,7 @@ import ( func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, alphaSync event.Handler, ) error { - locodeValidator, err := s.newLocodeValidator(cfg) - if err != nil { - return err - } + locodeValidator := s.newLocodeValidator(cfg) netSettings := (*networkSettings)(s.netmapClient) @@ -51,8 +48,9 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, poolSize := cfg.GetInt("workers.netmap") s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) + var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), @@ -100,7 +98,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain * fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err)) } mainnetChain.from = fromMainChainBlock @@ -161,7 +159,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli } else { // create governance processor governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, FrostFSClient: frostfsCli, AlphabetState: s, @@ -227,7 +225,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, @@ -249,7 +247,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, AlphabetState: s, @@ -270,7 +268,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, FrostFSClient: frostfsCli, @@ -293,7 +291,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, @@ -344,7 +342,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, controlsrv.WithAllowedKeys(authKeys), - ), log, audit) + ), log.WithTag(logger.TagGrpcSvc), audit) grpcControlSrv := grpc.NewServer() control.RegisterControlServiceServer(grpcControlSrv, controlSvc) @@ -380,7 +378,6 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { // form morph container client's options morphCnrOpts := make([]container.Option, 0, 3) morphCnrOpts = append(morphCnrOpts, - container.TryNotary(), container.AsAlphabet(), ) @@ -390,12 +387,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { } s.containerClient = result.CnrClient - s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet()) + s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet()) if err != nil { return nil, err } - s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet()) + s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet()) if err != nil { return nil, err } @@ -457,11 +454,11 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) } morphChain := &chainParams{ - log: s.log, + log: s.log.WithTag(logger.TagMorph), cfg: cfg, key: s.key, name: morphPrefix, diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 4fe9cc084..3a5137261 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { if err != nil { // we don't stop inner ring execution on this error s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, - zap.String("error", err.Error())) + zap.Error(err)) } s.tickInitialExpoch(ctx) @@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) { for _, c := range s.closers { if err := c(); err != nil { s.log.Warn(ctx, logs.InnerringCloserError, - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan ) (*Server, error) { var err error server := &Server{ - log: log, + log: log.WithTag(logger.TagIr), irMetrics: metrics, cmode: cmode, } @@ -575,19 +575,19 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe func (s *Server) initConfigFromBlockchain(ctx context.Context) error { // get current epoch - epoch, err := s.netmapClient.Epoch() + epoch, err := s.netmapClient.Epoch(ctx) if err != nil { return fmt.Errorf("can't read epoch number: %w", err) } // get current epoch duration - epochDuration, err := s.netmapClient.EpochDuration() + epochDuration, err := s.netmapClient.EpochDuration(ctx) if err != nil { return fmt.Errorf("can't read epoch duration: %w", err) } // get balance precision - balancePrecision, err := s.balanceClient.Decimals() + balancePrecision, err := s.balanceClient.Decimals(ctx) if err != nil { return fmt.Errorf("can't read balance contract precision: %w", err) } @@ -597,7 +597,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { s.precision.SetBalancePrecision(balancePrecision) // get next epoch delta tick - s.initialEpochTickDelta, err = s.nextEpochBlockDelta() + s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) if err != nil { return err } @@ -613,8 +613,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { return nil } -func (s *Server) nextEpochBlockDelta() (uint32, error) { - epochBlock, err := s.netmapClient.LastEpochBlock() +func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { + epochBlock, err := s.netmapClient.LastEpochBlock(ctx) if err != nil { return 0, fmt.Errorf("can't read last epoch block: %w", err) } diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go index a0c3ea751..ae4c85168 100644 --- a/pkg/innerring/locode.go +++ b/pkg/innerring/locode.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/viper" ) -func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) { +func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { locodeDB := locodebolt.New(locodebolt.Prm{ Path: cfg.GetString("locode.db.path"), }, @@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, err return irlocode.New(irlocode.Prm{ DB: (*locodeBoltDBWrapper)(locodeDB), - }), nil + }) } type locodeBoltEntryWrapper struct { diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go index 9961710ca..fb11e9426 100644 --- a/pkg/innerring/netmap.go +++ b/pkg/innerring/netmap.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -17,8 +18,8 @@ type networkSettings netmapclient.Client // MaintenanceModeAllowed requests network configuration from the Sidechain // and check allowance of storage node's maintenance mode according to it. // Always returns state.ErrMaintenanceModeDisallowed. -func (s *networkSettings) MaintenanceModeAllowed() error { - allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed() +func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { + allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) if err != nil { return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) } else if allowed { diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index ac3e2a14d..1da3c401d 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -279,6 +279,6 @@ type testNetmapClient struct { netmap *netmap.NetMap } -func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 229261250..d3d0f83f2 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool { // there is no signature collecting, so we don't need extra fee _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) if err != nil { - ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err)) return false } @@ -44,10 +44,10 @@ func (ap *Processor) processEmit(ctx context.Context) bool { return true } - networkMap, err := ap.netmapClient.NetMap() + networkMap, err := ap.netmapClient.NetMap(ctx) if err != nil { ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, - zap.String("error", err.Error())) + zap.Error(err)) return false } @@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net ap.log.Warn(ctx, logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 3992e00f3..0aea74003 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -36,7 +36,7 @@ type ( } netmapClient interface { - NetMap() (*netmap.NetMap, error) + NetMap(ctx context.Context) (*netmap.NetMap, error) } morphClient interface { @@ -114,11 +114,6 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) { ap.pwLock.Unlock() } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { return nil diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index e2f649600..34203b74f 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -88,32 +88,16 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var parsers []event.NotificationParserInfo - - // new lock event - lock := event.NotificationParserInfo{} - lock.SetType(lockNotification) - lock.SetScriptHash(bp.balanceSC) - lock.SetParser(balanceEvent.ParseLock) - parsers = append(parsers, lock) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var handlers []event.NotificationHandlerInfo - - // lock handler - lock := event.NotificationHandlerInfo{} - lock.SetType(lockNotification) - lock.SetScriptHash(bp.balanceSC) - lock.SetHandler(bp.handleLock) - handlers = append(handlers, lock) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: bp.balanceSC, + Type: lockNotification, + Parser: balanceEvent.ParseLock, + Handlers: []event.Handler{bp.handleLock}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go index ba12ebb37..5334b9a1f 100644 --- a/pkg/innerring/processors/container/common.go +++ b/pkg/innerring/processors/container/common.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/ecdsa" "errors" "fmt" @@ -45,7 +46,7 @@ type signatureVerificationData struct { // - v.binPublicKey is a public session key // - session context corresponds to the container and verb in v // - session is "alive" -func (cp *Processor) verifySignature(v signatureVerificationData) error { +func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { var err error var key frostfsecdsa.PublicKeyRFC6979 keyProvided := v.binPublicKey != nil @@ -58,7 +59,7 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error { } if len(v.binTokenSession) > 0 { - return cp.verifyByTokenSession(v, &key, keyProvided) + return cp.verifyByTokenSession(ctx, v, &key, keyProvided) } if keyProvided { @@ -77,8 +78,8 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error { return errors.New("signature is invalid or calculated with the key not bound to the container owner") } -func (cp *Processor) checkTokenLifetime(token session.Container) error { - curEpoch, err := cp.netState.Epoch() +func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { + curEpoch, err := cp.netState.Epoch(ctx) if err != nil { return fmt.Errorf("could not read current epoch: %w", err) } @@ -90,7 +91,7 @@ func (cp *Processor) checkTokenLifetime(token session.Container) error { return nil } -func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { +func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { var tok session.Container err := tok.Unmarshal(v.binTokenSession) @@ -118,7 +119,7 @@ func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *fros return errors.New("owner differs with token owner") } - err = cp.checkTokenLifetime(tok) + err = cp.checkTokenLifetime(ctx, tok) if err != nil { return fmt.Errorf("check session lifetime: %w", err) } diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index f28e5372a..1b3842eb0 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -170,11 +170,11 @@ type testNetworkState struct { epoch uint64 } -func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) { +func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { return s.homHashDisabled, nil } -func (s *testNetworkState) Epoch() (uint64, error) { +func (s *testNetworkState) Epoch(context.Context) (uint64, error) { return s.epoch, nil } @@ -187,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) { +func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { key := hex.EncodeToString(cid) if cont, found := c.get[key]; found { return cont, nil @@ -237,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction) type testFrostFSIDClient struct{} -func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { return &frostfsidclient.Subject{}, nil } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 16c450166..8e4ab2623 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -47,10 +47,10 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool e: put, } - err := cp.checkPutContainer(pctx) + err := cp.checkPutContainer(ctx, pctx) if err != nil { cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, - zap.String("error", err.Error()), + zap.Error(err), ) return false @@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, - zap.String("error", err.Error()), + zap.Error(err), ) return false } @@ -66,8 +66,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool return true } -func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { - binCnr := ctx.e.Container() +func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { + binCnr := pctx.e.Container() var cnr containerSDK.Container err := cnr.Unmarshal(binCnr) @@ -75,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { return fmt.Errorf("invalid binary container: %w", err) } - err = cp.verifySignature(signatureVerificationData{ + err = cp.verifySignature(ctx, signatureVerificationData{ ownerContainer: cnr.Owner(), verb: session.VerbContainerPut, - binTokenSession: ctx.e.SessionToken(), - binPublicKey: ctx.e.PublicKey(), - signature: ctx.e.Signature(), + binTokenSession: pctx.e.SessionToken(), + binPublicKey: pctx.e.PublicKey(), + signature: pctx.e.Signature(), signedData: binCnr, }) if err != nil { @@ -88,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { } // check homomorphic hashing setting - err = checkHomomorphicHashing(cp.netState, cnr) + err = checkHomomorphicHashing(ctx, cp.netState, cnr) if err != nil { return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) } // check native name and zone - err = cp.checkNNS(ctx, cnr) + err = cp.checkNNS(ctx, pctx, cnr) if err != nil { return fmt.Errorf("NNS: %w", err) } @@ -110,10 +110,10 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven return true } - err := cp.checkDeleteContainer(e) + err := cp.checkDeleteContainer(ctx, e) if err != nil { cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, - zap.String("error", err.Error()), + zap.Error(err), ) return false @@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, - zap.String("error", err.Error()), + zap.Error(err), ) return false @@ -130,7 +130,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven return true } -func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { +func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { binCnr := e.ContainerID() var idCnr cid.ID @@ -141,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { } // receive owner of the related container - cnr, err := cp.cnrClient.Get(binCnr) + cnr, err := cp.cnrClient.Get(ctx, binCnr) if err != nil { return fmt.Errorf("could not receive the container: %w", err) } - err = cp.verifySignature(signatureVerificationData{ + err = cp.verifySignature(ctx, signatureVerificationData{ ownerContainer: cnr.Value.Owner(), verb: session.VerbContainerDelete, idContainerSet: true, @@ -163,21 +163,21 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { return nil } -func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error { +func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { // fetch domain info - ctx.d = containerSDK.ReadDomain(cnr) + pctx.d = containerSDK.ReadDomain(cnr) // if PutNamed event => check if values in container correspond to args - if named, ok := ctx.e.(interface { + if named, ok := pctx.e.(interface { Name() string Zone() string }); ok { - if name := named.Name(); name != ctx.d.Name() { - return fmt.Errorf("names differ %s/%s", name, ctx.d.Name()) + if name := named.Name(); name != pctx.d.Name() { + return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) } - if zone := named.Zone(); zone != ctx.d.Zone() { - return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone()) + if zone := named.Zone(); zone != pctx.d.Zone() { + return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) } } @@ -186,12 +186,12 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain return fmt.Errorf("could not get container owner address: %w", err) } - subject, err := cp.frostFSIDClient.GetSubject(addr) + subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) if err != nil { return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) } - namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns") + namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") if !hasNamespace { return nil } @@ -203,13 +203,13 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain return nil } -func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error { - netSetting, err := ns.HomomorphicHashDisabled() +func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { + netSetting, err := ns.HomomorphicHashDisabled(ctx) if err != nil { return fmt.Errorf("could not get setting in contract: %w", err) } - if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting { + if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting { return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting) } diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 58b90457c..9be93baa4 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -25,7 +25,7 @@ type ( ContClient interface { ContractAddress() util.Uint160 - Get(cid []byte) (*containercore.Container, error) + Get(ctx context.Context, cid []byte) (*containercore.Container, error) } MorphClient interface { @@ -33,7 +33,7 @@ type ( } FrostFSIDClient interface { - GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) + GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) } // Processor of events produced by container contract in the sidechain. @@ -68,7 +68,7 @@ type NetworkState interface { // // Must return any error encountered // which did not allow reading the value. - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) // HomomorphicHashDisabled must return boolean that // represents homomorphic network state: @@ -76,7 +76,7 @@ type NetworkState interface { // * false if hashing is enabled. // // which did not allow reading the value. - HomomorphicHashDisabled() (bool, error) + HomomorphicHashDisabled(ctx context.Context) (bool, error) } // New creates a container contract processor instance. @@ -118,11 +118,6 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { return nil diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index ee824ea31..d10eb9660 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, - zap.String("error", err.Error())) + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index 6c29d330d..9d3bf65cd 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -142,70 +142,34 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var ( - parsers = make([]event.NotificationParserInfo, 0, 6) - - p event.NotificationParserInfo - ) - - p.SetScriptHash(np.frostfsContract) - - // deposit event - p.SetType(event.TypeFromString(depositNotification)) - p.SetParser(frostfsEvent.ParseDeposit) - parsers = append(parsers, p) - - // withdraw event - p.SetType(event.TypeFromString(withdrawNotification)) - p.SetParser(frostfsEvent.ParseWithdraw) - parsers = append(parsers, p) - - // cheque event - p.SetType(event.TypeFromString(chequeNotification)) - p.SetParser(frostfsEvent.ParseCheque) - parsers = append(parsers, p) - - // config event - p.SetType(event.TypeFromString(configNotification)) - p.SetParser(frostfsEvent.ParseConfig) - parsers = append(parsers, p) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var ( - handlers = make([]event.NotificationHandlerInfo, 0, 6) - - h event.NotificationHandlerInfo - ) - - h.SetScriptHash(np.frostfsContract) - - // deposit handler - h.SetType(event.TypeFromString(depositNotification)) - h.SetHandler(np.handleDeposit) - handlers = append(handlers, h) - - // withdraw handler - h.SetType(event.TypeFromString(withdrawNotification)) - h.SetHandler(np.handleWithdraw) - handlers = append(handlers, h) - - // cheque handler - h.SetType(event.TypeFromString(chequeNotification)) - h.SetHandler(np.handleCheque) - handlers = append(handlers, h) - - // config handler - h.SetType(event.TypeFromString(configNotification)) - h.SetHandler(np.handleConfig) - handlers = append(handlers, h) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: np.frostfsContract, + Type: event.TypeFromString(depositNotification), + Parser: frostfsEvent.ParseDeposit, + Handlers: []event.Handler{np.handleDeposit}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(withdrawNotification), + Parser: frostfsEvent.ParseWithdraw, + Handlers: []event.Handler{np.handleWithdraw}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(chequeNotification), + Parser: frostfsEvent.ParseCheque, + Handlers: []event.Handler{np.handleCheque}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(configNotification), + Parser: frostfsEvent.ParseConfig, + Handlers: []event.Handler{np.handleConfig}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 5a6126249..864c5da67 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -236,7 +236,7 @@ type testIRFetcher struct { publicKeys keys.PublicKeys } -func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { return f.publicKeys, nil } @@ -266,7 +266,7 @@ type testMainnetClient struct { designateHash util.Uint160 } -func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) { +func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { return c.alphabetKeys, nil } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 73d21a7d2..6e22abb3c 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -25,24 +25,24 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 return true } - mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() + mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx) if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, - zap.String("error", err.Error())) + zap.Error(err)) return false } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain, - zap.String("error", err.Error())) + zap.Error(err)) return false } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, - zap.String("error", err.Error())) + zap.Error(err)) return false } @@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 err = gp.voter.VoteForSidechainValidator(ctx, votePrm) if err != nil { gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee, - zap.String("error", err.Error())) + zap.Error(err)) } // 2. Update NeoFSAlphabet role in the sidechain. @@ -95,17 +95,17 @@ func prettyKeys(keys keys.PublicKeys) string { } func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { - innerRing, err := gp.irFetcher.InnerRingKeys() + innerRing, err := gp.irFetcher.InnerRingKeys(ctx) if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, - zap.String("error", err.Error())) + zap.Error(err)) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil { gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, - zap.String("error", err.Error())) + zap.Error(err)) } } @@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe err := gp.morphClient.UpdateNotaryList(ctx, updPrm) if err != nil { gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, - zap.String("error", err.Error())) + zap.Error(err)) } } @@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph err := gp.frostfsClient.AlphabetUpdate(ctx, prm) if err != nil { gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, - zap.String("error", err.Error())) + zap.Error(err)) } } diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 565f4c27d..2d131edda 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -52,7 +52,7 @@ type ( // Implementation must take into account availability of // the notary contract. IRFetcher interface { - InnerRingKeys() (keys.PublicKeys, error) + InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) } FrostFSClient interface { @@ -64,7 +64,7 @@ type ( } MainnetClient interface { - NeoFSAlphabetList() (res keys.PublicKeys, err error) + NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) GetDesignateHash() util.Uint160 } @@ -155,22 +155,16 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var pi event.NotificationParserInfo - pi.SetScriptHash(gp.designate) - pi.SetType(event.TypeFromString(native.DesignationEventName)) - pi.SetParser(rolemanagement.ParseDesignate) - return []event.NotificationParserInfo{pi} -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var hi event.NotificationHandlerInfo - hi.SetScriptHash(gp.designate) - hi.SetType(event.TypeFromString(native.DesignationEventName)) - hi.SetHandler(gp.HandleAlphabetSync) - return []event.NotificationHandlerInfo{hi} + return []event.NotificationHandlerInfo{ + { + Contract: gp.designate, + Type: event.TypeFromString(native.DesignationEventName), + Parser: rolemanagement.ParseDesignate, + Handlers: []event.Handler{gp.HandleAlphabetSync}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 5a5adfb2d..934c3790d 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -294,7 +294,7 @@ type testNodeStateSettings struct { maintAllowed bool } -func (s *testNodeStateSettings) MaintenanceModeAllowed() error { +func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { if s.maintAllowed { return nil } @@ -303,7 +303,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed() error { type testValidator struct{} -func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error { +func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error { return nil } @@ -381,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testNetmapClient) EpochDuration() (uint64, error) { +func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) { return c.epochDuration, nil } @@ -392,7 +392,7 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) { return 0, fmt.Errorf("not found") } -func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go index 5e0558344..b81dc9989 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go @@ -1,6 +1,7 @@ package locode import ( + "context" "errors" "fmt" @@ -29,7 +30,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record // - Continent: R.Continent().String(). // // UN-LOCODE attribute remains untouched. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { attrLocode := n.LOCODE() if attrLocode == "" { return nil diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go index 8ab174dfd..fa2dd1ac1 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go @@ -1,6 +1,7 @@ package locode_test import ( + "context" "errors" "fmt" "testing" @@ -92,7 +93,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { t.Run("w/o locode", func(t *testing.T) { n := nodeInfoWithSomeAttrs() - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.NoError(t, err) }) @@ -102,7 +103,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttrValue(n, "WRONG LOCODE") - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.Error(t, err) }) @@ -111,7 +112,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"}) - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.Error(t, err) }) @@ -119,7 +120,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, r.LOCODE) - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.NoError(t, err) require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode")) diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go index 126f36582..0e4628ac7 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go @@ -1,6 +1,7 @@ package maddress import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -8,7 +9,7 @@ import ( ) // VerifyAndUpdate calls network.VerifyAddress. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { err := network.VerifyMultiAddress(*n) if err != nil { return fmt.Errorf("could not verify multiaddress: %w", err) diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go index e5165f618..03c41a451 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go @@ -7,6 +7,7 @@ map candidates. package state import ( + "context" "errors" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -23,7 +24,7 @@ type NetworkSettings interface { // no error if allowed; // ErrMaintenanceModeDisallowed if disallowed; // other error if there are any problems with the check. - MaintenanceModeAllowed() error + MaintenanceModeAllowed(ctx context.Context) error } // NetMapCandidateValidator represents tool which checks state of nodes which @@ -55,13 +56,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting // MUST NOT be called before SetNetworkSettings. // // See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. -func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error { +func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error { if node.Status().IsOnline() { return nil } if node.Status().IsMaintenance() { - return x.netSettings.MaintenanceModeAllowed() + return x.netSettings.MaintenanceModeAllowed(ctx) } return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE") diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go index b81d7243b..cbf48a710 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go @@ -1,6 +1,7 @@ package state_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -13,7 +14,7 @@ type testNetworkSettings struct { disallowed bool } -func (x testNetworkSettings) MaintenanceModeAllowed() error { +func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error { if x.disallowed { return state.ErrMaintenanceModeDisallowed } @@ -81,7 +82,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { testCase.validatorPreparer(&v) } - err := v.VerifyAndUpdate(&node) + err := v.VerifyAndUpdate(context.Background(), &node) if testCase.valid { require.NoError(t, err, testCase.name) diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go index e9b24e024..3dbe98a8d 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go @@ -1,6 +1,8 @@ package nodevalidation import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -26,9 +28,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator { // VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators. // // If error appears, returns it immediately. -func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error { +func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error { for _, v := range c.validators { - if err := v.VerifyAndUpdate(ni); err != nil { + if err := v.VerifyAndUpdate(ctx, ni); err != nil { return err } } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index a43005ffb..8f8cc17ff 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea }) if err != nil { np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache, - zap.String("error", err.Error())) + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 237c4e512..7c78d24a5 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -14,10 +14,10 @@ import ( func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { epoch := ev.EpochNumber() - epochDuration, err := np.netmapClient.EpochDuration() + epochDuration, err := np.netmapClient.EpochDuration(ctx) if err != nil { np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, - zap.String("error", err.Error())) + zap.Error(err)) } else { np.epochState.SetEpochDuration(epochDuration) } @@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc if err != nil { np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), - zap.String("error", err.Error())) + zap.Error(err)) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { np.log.Warn(ctx, logs.NetmapCantResetEpochTimer, - zap.String("error", err.Error())) + zap.Error(err)) } // get new netmap snapshot - networkMap, err := np.netmapClient.NetMap() + networkMap, err := np.netmapClient.NetMap(ctx) if err != nil { np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, - zap.String("error", err.Error())) + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 72aa08f76..b5c727cc7 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -39,10 +39,10 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) } // validate and update node info - err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) + err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo) if err != nil { np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, - zap.String("error", err.Error()), + zap.Error(err), ) return false @@ -108,7 +108,7 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat var err error if ev.Maintenance() { - err = np.nodeStateSettings.MaintenanceModeAllowed() + err = np.nodeStateSettings.MaintenanceModeAllowed(ctx) if err != nil { np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index b3d57e85b..277bca1c3 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -49,15 +49,15 @@ type ( // // If no error occurs, the parameter must point to the // ready-made NodeInfo structure. - VerifyAndUpdate(*netmap.NodeInfo) error + VerifyAndUpdate(context.Context, *netmap.NodeInfo) error } Client interface { MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error ContractAddress() util.Uint160 - EpochDuration() (uint64, error) + EpochDuration(ctx context.Context) (uint64, error) MorphTxHeight(h util.Uint256) (res uint32, err error) - NetMap() (*netmap.NetMap, error) + NetMap(ctx context.Context) (*netmap.NetMap, error) NewEpoch(ctx context.Context, epoch uint64) error MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error @@ -161,36 +161,16 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - parsers := make([]event.NotificationParserInfo, 0, 3) - - var p event.NotificationParserInfo - - p.SetScriptHash(np.netmapClient.ContractAddress()) - - // new epoch event - p.SetType(newEpochNotification) - p.SetParser(netmapEvent.ParseNewEpoch) - parsers = append(parsers, p) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - handlers := make([]event.NotificationHandlerInfo, 0, 3) - - var i event.NotificationHandlerInfo - - i.SetScriptHash(np.netmapClient.ContractAddress()) - - // new epoch handler - i.SetType(newEpochNotification) - i.SetHandler(np.handleNewEpoch) - handlers = append(handlers, i) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: np.netmapClient.ContractAddress(), + Type: newEpochNotification, + Parser: netmapEvent.ParseNewEpoch, + Handlers: []event.Handler{np.handleNewEpoch}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go index 9cd71ae48..310f12248 100644 --- a/pkg/innerring/processors/netmap/wrappers.go +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -34,16 +34,16 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 { return w.netmapClient.ContractAddress() } -func (w *netmapClientWrapper) EpochDuration() (uint64, error) { - return w.netmapClient.EpochDuration() +func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) { + return w.netmapClient.EpochDuration(ctx) } func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { return w.netmapClient.Morph().TxHeight(h) } -func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { - return w.netmapClient.NetMap() +func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) { + return w.netmapClient.NetMap(ctx) } func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 77c2af2ce..0ef771359 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -60,9 +60,9 @@ func (s *Server) IsAlphabet(ctx context.Context) bool { // InnerRingIndex is a getter for a global index of node in inner ring list. Negative // index means that node is not in the inner ring list. func (s *Server) InnerRingIndex(ctx context.Context) int { - index, err := s.statusIndex.InnerRingIndex() + index, err := s.statusIndex.InnerRingIndex(ctx) if err != nil { - s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) return -1 } @@ -72,9 +72,9 @@ func (s *Server) InnerRingIndex(ctx context.Context) int { // InnerRingSize is a getter for a global size of inner ring list. This value // paired with inner ring index. func (s *Server) InnerRingSize(ctx context.Context) int { - size, err := s.statusIndex.InnerRingSize() + size, err := s.statusIndex.InnerRingSize(ctx) if err != nil { - s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) return 0 } @@ -84,9 +84,9 @@ func (s *Server) InnerRingSize(ctx context.Context) int { // AlphabetIndex is a getter for a global index of node in alphabet list. // Negative index means that node is not in the alphabet list. func (s *Server) AlphabetIndex(ctx context.Context) int { - index, err := s.statusIndex.AlphabetIndex() + index, err := s.statusIndex.AlphabetIndex(ctx) if err != nil { - s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) return -1 } @@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index 08ef8b86c..a6c40f9fa 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Blobovnicza")) + c.log = l } } diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index d0e71a876..4947512cc 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error { }) }) if err != nil { - return fmt.Errorf("can't determine DB size: %w", err) + return fmt.Errorf("determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) @@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error { return saveItemsCount(tx, items) }); err != nil { b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) - return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) + return fmt.Errorf("save blobovnicza's size and items count: %w", err) } b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index d821b2991..8f24b5675 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -6,7 +6,6 @@ import ( "syscall" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -94,7 +93,6 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(dataSize)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) b.itemDeleted(recordSize) } diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go index 01e5529da..cd33b263c 100644 --- a/pkg/local_object_storage/blobovnicza/iterate.go +++ b/pkg/local_object_storage/blobovnicza/iterate.go @@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes, if prm.ignoreErrors { return nil } - return fmt.Errorf("could not decode address key: %w", err) + return fmt.Errorf("decode address key: %w", err) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index d9e99d0d1..3e8b9f07b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -158,11 +158,11 @@ func (b *Blobovniczas) Path() string { } // SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Config) { +func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { b.compression = cc } -func (b *Blobovniczas) Compressor() *compression.Config { +func (b *Blobovniczas) Compressor() *compression.Compressor { return b.compression } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index ec9743b57..f87f4a144 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -19,7 +19,8 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { st := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(10), WithBlobovniczaShallowDepth(1), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index c77df63bf..a6c1ce368 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -41,35 +41,34 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(b.blzInitWorkerCount) - err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { - eg.Go(func() error { - p = strings.TrimSuffix(p, rebuildSuffix) - shBlz := b.getBlobovniczaWithoutCaching(p) - blz, err := shBlz.Open(egCtx) - if err != nil { - return err - } - defer shBlz.Close(egCtx) - - moveInfo, err := blz.ListMoveInfo(egCtx) - if err != nil { - return err - } - for _, move := range moveInfo { - b.deleteProtectedObjects.Add(move.Address) - } - - b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) - return nil - }) - return false, nil - }) - if err != nil { - _ = eg.Wait() - return err + if b.blzInitWorkerCount > 0 { + eg.SetLimit(b.blzInitWorkerCount + 1) } + eg.Go(func() error { + return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { + eg.Go(func() error { + p = strings.TrimSuffix(p, rebuildSuffix) + shBlz := b.getBlobovniczaWithoutCaching(p) + blz, err := shBlz.Open(egCtx) + if err != nil { + return err + } + defer shBlz.Close(egCtx) + moveInfo, err := blz.ListMoveInfo(egCtx) + if err != nil { + return err + } + for _, move := range moveInfo { + b.deleteProtectedObjects.Add(move.Address) + } + + b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) + return nil + }) + return false, nil + }) + }) return eg.Wait() } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go index b26323bd0..7db1891f9 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go @@ -2,6 +2,9 @@ package blobovniczatree import ( "context" + "os" + "path" + "strconv" "testing" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -129,3 +132,34 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, blz.Close(context.Background())) } + +func TestInitBlobovniczasInitErrorType(t *testing.T) { + t.Parallel() + + rootDir := t.TempDir() + + for idx := 0; idx < 10; idx++ { + f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db")) + require.NoError(t, err) + _, err = f.Write([]byte("invalid db")) + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix)) + require.NoError(t, err) + require.NoError(t, f.Close()) + } + + blz := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaShallowDepth(1), + WithBlobovniczaShallowWidth(1), + WithRootPath(rootDir), + ) + + require.NoError(t, blz.Open(mode.ComponentReadWrite)) + err := blz.Init() + require.Contains(t, err.Error(), "open blobovnicza") + require.Contains(t, err.Error(), "invalid database") + require.NoError(t, blz.Close(context.Background())) +} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 8c2d7aa67..d096791c3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -19,7 +18,10 @@ import ( "go.uber.org/zap" ) -var errObjectIsDeleteProtected = errors.New("object is delete protected") +var ( + errObjectIsDeleteProtected = errors.New("object is delete protected") + deleteRes = common.DeleteRes{} +) // Delete deletes object from blobovnicza tree. // @@ -43,17 +45,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co defer span.End() if b.readOnly { - return common.DeleteRes{}, common.ErrReadOnly + return deleteRes, common.ErrReadOnly } if b.rebuildGuard.TryRLock() { defer b.rebuildGuard.RUnlock() } else { - return common.DeleteRes{}, errRebuildInProgress + return deleteRes, errRebuildInProgress } if b.deleteProtectedObjects.Contains(prm.Address) { - return common.DeleteRes{}, errObjectIsDeleteProtected + return deleteRes, errObjectIsDeleteProtected } var bPrm blobovnicza.DeletePrm @@ -82,8 +84,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + zap.Error(err), ) } } @@ -98,7 +99,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if err == nil && !objectFound { // not found in any blobovnicza - return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound)) } success = err == nil @@ -112,7 +113,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz shBlz := b.getBlobovnicza(ctx, blzPath) blz, err := shBlz.Open(ctx) if err != nil { - return common.DeleteRes{}, err + return deleteRes, err } defer shBlz.Close(ctx) @@ -122,5 +123,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz // removes object from blobovnicza and returns common.DeleteRes. func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) { _, err := blz.Delete(ctx, prm) - return common.DeleteRes{}, err + return deleteRes, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 63d2f21e1..0c5e48821 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "go.opentelemetry.io/otel/attribute" @@ -57,8 +56,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index 5414140f0..df2b4ffe5 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -19,7 +19,8 @@ func TestExistsInvalidStorageID(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go index d390ecf1d..9244d765c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go @@ -15,7 +15,8 @@ func TestGeneric(t *testing.T) { helper := func(t *testing.T, dir string) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -43,7 +44,8 @@ func TestControl(t *testing.T) { newTree := func(t *testing.T) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index b7ef8d8a5..e5c83e5f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -69,8 +68,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + zap.Error(err), ) } } @@ -115,13 +113,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index b24f1b881..27d13f4f3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -11,7 +11,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -71,8 +70,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if !outOfBounds && !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } if outOfBounds { return true, err @@ -130,13 +128,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err) } from := prm.Range.GetOffset() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index b120c22f7..ceb8fb7e3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -44,12 +44,12 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm if prm.IgnoreErrors { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", elem.Address()), - zap.String("err", err.Error()), + zap.Error(err), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) return nil } - return fmt.Errorf("could not decompress object data: %w", err) + return fmt.Errorf("decompress object data: %w", err) } if prm.Handler != nil { @@ -77,12 +77,12 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo if err != nil { if ignoreErrors { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("err", err.Error()), + zap.Error(err), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) return false, nil } - return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) + return false, fmt.Errorf("open blobovnicza %s: %w", p, err) } defer shBlz.Close(ctx) @@ -249,6 +249,12 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres } func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) { + select { + case <-ctx.Done(): + return false, ctx.Err() + default: + } + sysPath := filepath.Join(b.rootPath, path) entries, err := os.ReadDir(sysPath) if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index b35e052cf..6438f715b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { )...) if err := blz.Open(ctx); err != nil { - return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) + return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err) } if err := blz.Init(ctx); err != nil { - return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err) + return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err) } b.refCount++ @@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) { if err := b.blcza.Close(ctx); err != nil { b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), - zap.String("error", err.Error()), + zap.Error(err), ) } b.blcza = nil @@ -125,9 +125,9 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { if err := b.blcza.Close(ctx); err != nil { b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), - zap.String("error", err.Error()), + zap.Error(err), ) - return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err) + return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err) } b.refCount = 0 @@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string { return b.path } -// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDbManager struct { +// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. +type levelDBManager struct { dbMtx *sync.RWMutex databases map[uint64]*sharedDB @@ -157,8 +157,8 @@ type levelDbManager struct { func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDbManager { - result := &levelDbManager{ +) *levelDBManager { + result := &levelDBManager{ databases: make(map[uint64]*sharedDB), dbMtx: &sync.RWMutex{}, @@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st return result } -func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { +func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { res := m.getDBIfExists(idx) if res != nil { return res @@ -181,14 +181,14 @@ func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { return m.getOrCreateDB(idx) } -func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB { +func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { m.dbMtx.RLock() defer m.dbMtx.RUnlock() return m.databases[idx] } -func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { +func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { m.dbMtx.Lock() defer m.dbMtx.Unlock() @@ -202,7 +202,7 @@ func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { return db } -func (m *levelDbManager) hasAnyDB() bool { +func (m *levelDBManager) hasAnyDB() bool { m.dbMtx.RLock() defer m.dbMtx.RUnlock() @@ -213,7 +213,7 @@ func (m *levelDbManager) hasAnyDB() bool { // // The blobovnicza opens at the first request, closes after the last request. type dbManager struct { - levelToManager map[string]*levelDbManager + levelToManager map[string]*levelDBManager levelToManagerGuard *sync.RWMutex closedFlag *atomic.Bool dbCounter *openDBCounter @@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, options: options, readOnly: readOnly, metrics: metrics, - levelToManager: make(map[string]*levelDbManager), + levelToManager: make(map[string]*levelDBManager), levelToManagerGuard: &sync.RWMutex{}, log: log, closedFlag: &atomic.Bool{}, @@ -266,7 +266,7 @@ func (m *dbManager) Close() { m.dbCounter.WaitUntilAllClosed() } -func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { result := m.getLevelManagerIfExists(lvlPath) if result != nil { return result @@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { return m.getOrCreateLevelManager(lvlPath) } -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { m.levelToManagerGuard.RLock() defer m.levelToManagerGuard.RUnlock() return m.levelToManager[lvlPath] } -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { m.levelToManagerGuard.Lock() defer m.levelToManagerGuard.Unlock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 0e1b2022e..5f268b0f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -19,7 +19,7 @@ type cfg struct { openedCacheSize int blzShallowDepth uint64 blzShallowWidth uint64 - compression *compression.Config + compression *compression.Compressor blzOpts []blobovnicza.Option reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics @@ -63,10 +63,15 @@ func initConfig(c *cfg) { } } -func WithLogger(l *logger.Logger) Option { +func WithBlobovniczaTreeLogger(log *logger.Logger) Option { return func(c *cfg) { - c.log = l - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l)) + c.log = log + } +} + +func WithBlobovniczaLogger(log *logger.Logger) Option { + return func(c *cfg) { + c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 1678e578c..37c49d741 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -83,16 +82,14 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } return false, nil } if active == nil { - i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } defer active.Close(ctx) @@ -106,8 +103,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } if errors.Is(err, blobovnicza.ErrNoSpace) { i.AllFull = true diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 16ef2b180..a840275b8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm var res common.RebuildRes b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) - completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage) + completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += completedPreviosMoves if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) @@ -79,7 +79,7 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common. var completedDBCount uint32 for _, db := range dbs { b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) - movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter) + movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += movedObjects if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) @@ -195,7 +195,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil } -func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { shDB := b.getBlobovnicza(ctx, path) blz, err := shDB.Open(ctx) if err != nil { @@ -212,7 +212,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if err != nil { return 0, err } - migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter) + migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter) if err != nil { return migratedObjects, err } @@ -226,7 +226,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) - sysPath = sysPath + rebuildSuffix + sysPath += rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) if err != nil { return nil, err @@ -238,7 +238,7 @@ func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (fun }, nil } -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) { var result atomic.Uint64 batch := make(map[oid.Address][]byte) @@ -253,7 +253,12 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn }) for { - _, err := blz.Iterate(ctx, prm) + release, err := limiter.ReadRequest(ctx) + if err != nil { + return result.Load(), err + } + _, err = blz.Iterate(ctx, prm) + release() if err != nil && !errors.Is(err, errBatchFull) { return result.Load(), err } @@ -265,13 +270,19 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - if err := limiter.AcquireWorkSlot(egCtx); err != nil { + release, err := limiter.AcquireWorkSlot(egCtx) + if err != nil { _ = eg.Wait() return result.Load(), err } eg.Go(func() error { - defer limiter.ReleaseWorkSlot() - err := b.moveObject(egCtx, blz, blzPath, addr, data, meta) + defer release() + moveRelease, err := limiter.WriteRequest(ctx) + if err != nil { + return err + } + err = b.moveObject(egCtx, blz, blzPath, addr, data, meta) + moveRelease() if err == nil { result.Add(1) } @@ -317,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo return nil } -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) { +func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() @@ -330,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDb.CloseAndRemoveFile(ctx); err != nil { + if err := shDB.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) @@ -359,7 +370,7 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error { return b.dropDirectoryIfEmpty(filepath.Dir(path)) } -func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) { +func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) { var count uint64 var rebuildTempFilesToRemove []string err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { @@ -372,13 +383,24 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co } defer shDB.Close(ctx) + release, err := rateLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } incompletedMoves, err := blz.ListMoveInfo(ctx) + release() if err != nil { return true, err } for _, move := range incompletedMoves { - if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return false, err + } + err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore) + release() + if err != nil { return true, err } count++ @@ -388,9 +410,14 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co return false, nil }) for _, tmp := range rebuildTempFilesToRemove { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return count, err + } if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } + release() } return count, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 2f58624aa..4146ef260 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -140,7 +140,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -161,16 +162,18 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object storageIDs: make(map[oid.Address][]byte), guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 1, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 1, }) require.NoError(t, err) require.Equal(t, uint64(1), rRes.ObjectsMoved) require.Equal(t, uint64(0), rRes.FilesRemoved) require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index aae72b5ff..a7a99fec3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -2,7 +2,9 @@ package blobovniczatree import ( "context" + "fmt" "sync" + "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -48,7 +50,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -76,10 +79,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 60, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 60, }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -94,6 +98,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("no rebuild single db", func(t *testing.T) { @@ -102,7 +107,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -128,10 +134,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 90, // 64KB / 100KB = 64% + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 90, // 64KB / 100KB = 64% }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -146,6 +153,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by fill percent", func(t *testing.T) { @@ -154,7 +162,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -193,10 +202,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 80, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -215,6 +225,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by overflow", func(t *testing.T) { @@ -223,7 +234,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -254,7 +266,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), WithBlobovniczaShallowDepth(1), @@ -266,10 +279,11 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 80, + MetaStorage: metaStub, + Limiter: limiter, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -285,6 +299,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) } @@ -294,7 +309,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), // 64KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(2), // depth = 2 @@ -322,7 +338,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(32*1024), // 32KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(3), // depth = 3 @@ -338,9 +355,10 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -356,13 +374,15 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(sourceWidth), WithBlobovniczaShallowDepth(sourceDepth), @@ -403,7 +423,8 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(targetWidth), WithBlobovniczaShallowDepth(targetDepth), @@ -427,9 +448,10 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -445,6 +467,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } type storageIDUpdateStub struct { @@ -462,7 +485,36 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr return nil } -type rebuildLimiterStub struct{} +type rebuildLimiterStub struct { + slots atomic.Int64 + readRequests atomic.Int64 + writeRequests atomic.Int64 +} -func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil } -func (s *rebuildLimiterStub) ReleaseWorkSlot() {} +func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { + s.slots.Add(1) + return func() { s.slots.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { + s.readRequests.Add(1) + return func() { s.readRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { + s.writeRequests.Add(1) + return func() { s.writeRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ValidateReleased() error { + if v := s.slots.Load(); v != 0 { + return fmt.Errorf("invalid slots value %d", v) + } + if v := s.readRequests.Load(); v != 0 { + return fmt.Errorf("invalid read requests value %d", v) + } + if v := s.writeRequests.Load(); v != 0 { + return fmt.Errorf("invalid write requests value %d", v) + } + return nil +} diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index f850f48b4..ceaf2538a 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -41,7 +41,7 @@ type SubStorageInfo struct { type Option func(*cfg) type cfg struct { - compression compression.Config + compression compression.Compressor log *logger.Logger storage []SubStorage metrics Metrics @@ -91,50 +91,13 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "BlobStor")) + c.log = l } } -// WithCompressObjects returns option to toggle -// compression of the stored objects. -// -// If true, Zstandard algorithm is used for data compression. -// -// If compressor (decompressor) creation failed, -// the uncompressed option will be used, and the error -// is recorded in the provided log. -func WithCompressObjects(comp bool) Option { +func WithCompression(comp compression.Config) Option { return func(c *cfg) { - c.compression.Enabled = comp - } -} - -// WithCompressibilityEstimate returns an option to use -// normilized compressibility estimate to decide compress -// data or not. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimate(v bool) Option { - return func(c *cfg) { - c.compression.UseCompressEstimation = v - } -} - -// WithCompressibilityEstimateThreshold returns an option to set -// normilized compressibility estimate threshold. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimateThreshold(threshold float64) Option { - return func(c *cfg) { - c.compression.CompressEstimationThreshold = threshold - } -} - -// WithUncompressableContentTypes returns option to disable decompression -// for specific content types as seen by object.AttributeContentType attribute. -func WithUncompressableContentTypes(values []string) Option { - return func(c *cfg) { - c.compression.UncompressableContentTypes = values + c.compression.Config = comp } } @@ -152,6 +115,6 @@ func WithMetrics(m Metrics) Option { } } -func (b *BlobStor) Compressor() *compression.Config { - return &b.cfg.compression +func (b *BlobStor) Compressor() *compression.Compressor { + return &b.compression } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index 6cc56fa3b..6ddeb6f00 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -51,7 +52,9 @@ func TestCompression(t *testing.T) { newBlobStor := func(t *testing.T, compress bool) *BlobStor { bs := New( - WithCompressObjects(compress), + WithCompression(compression.Config{ + Enabled: compress, + }), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) require.NoError(t, bs.Init(context.Background())) @@ -113,8 +116,10 @@ func TestBlobstor_needsCompression(t *testing.T) { dir := t.TempDir() bs := New( - WithCompressObjects(compress), - WithUncompressableContentTypes(ct), + WithCompression(compression.Config{ + Enabled: compress, + UncompressableContentTypes: ct, + }), WithStorages([]SubStorage{ { Storage: blobovniczatree.NewBlobovniczaTree( diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go index 19e181ee7..788fe66f2 100644 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ b/pkg/local_object_storage/blobstor/common/rebuild.go @@ -12,16 +12,27 @@ type RebuildRes struct { } type RebuildPrm struct { - MetaStorage MetaStorage - WorkerLimiter ConcurrentWorkersLimiter - FillPercent int + MetaStorage MetaStorage + Limiter RebuildLimiter + FillPercent int } type MetaStorage interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() +type ReleaseFunc func() + +type ConcurrencyLimiter interface { + AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) +} + +type RateLimiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) +} + +type RebuildLimiter interface { + ConcurrencyLimiter + RateLimiter } diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index 6ecef48cd..e35c35e60 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -18,8 +18,8 @@ type Storage interface { Path() string ObjectsCount(ctx context.Context) (uint64, error) - SetCompressor(cc *compression.Config) - Compressor() *compression.Config + SetCompressor(cc *compression.Compressor) + Compressor() *compression.Compressor // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 9f70f8ec2..445a0494b 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -11,7 +11,7 @@ import ( ) func BenchmarkCompression(b *testing.B) { - c := Config{Enabled: true} + c := Compressor{Config: Config{Enabled: true}} require.NoError(b, c.Init()) for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { @@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) { } } -func benchWith(b *testing.B, c Config, data []byte) { +func benchWith(b *testing.B, c Compressor, data []byte) { b.ResetTimer() b.ReportAllocs() for range b.N { @@ -56,8 +56,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("estimate", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) @@ -76,8 +78,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("compress", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index 85ab47692..c76cec9a1 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -4,21 +4,36 @@ import ( "bytes" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/klauspost/compress" "github.com/klauspost/compress/zstd" ) +type Level string + +const ( + LevelDefault Level = "" + LevelOptimal Level = "optimal" + LevelFastest Level = "fastest" + LevelSmallestSize Level = "smallest_size" +) + +type Compressor struct { + Config + + encoder *zstd.Encoder + decoder *zstd.Decoder +} + // Config represents common compression-related configuration. type Config struct { Enabled bool UncompressableContentTypes []string + Level Level - UseCompressEstimation bool - CompressEstimationThreshold float64 - - encoder *zstd.Encoder - decoder *zstd.Decoder + EstimateCompressibility bool + EstimateCompressibilityThreshold float64 } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -26,11 +41,11 @@ type Config struct { var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} // Init initializes compression routines. -func (c *Config) Init() error { +func (c *Compressor) Init() error { var err error if c.Enabled { - c.encoder, err = zstd.NewWriter(nil) + c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) if err != nil { return err } @@ -73,7 +88,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { // Decompress decompresses data if it starts with the magic // and returns data untouched otherwise. -func (c *Config) Decompress(data []byte) ([]byte, error) { +func (c *Compressor) Decompress(data []byte) ([]byte, error) { if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { return data, nil } @@ -82,13 +97,13 @@ func (c *Config) Decompress(data []byte) ([]byte, error) { // Compress compresses data if compression is enabled // and returns data untouched otherwise. -func (c *Config) Compress(data []byte) []byte { +func (c *Compressor) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } - if c.UseCompressEstimation { + if c.EstimateCompressibility { estimated := compress.Estimate(data) - if estimated >= c.CompressEstimationThreshold { + if estimated >= c.EstimateCompressibilityThreshold { return c.compress(data) } return data @@ -96,7 +111,7 @@ func (c *Config) Compress(data []byte) []byte { return c.compress(data) } -func (c *Config) compress(data []byte) []byte { +func (c *Compressor) compress(data []byte) []byte { maxSize := c.encoder.MaxEncodedSize(len(data)) compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) if len(data) < len(compressed) { @@ -106,7 +121,7 @@ func (c *Config) compress(data []byte) []byte { } // Close closes encoder and decoder, returns any error occurred. -func (c *Config) Close() error { +func (c *Compressor) Close() error { var err error if c.encoder != nil { err = c.encoder.Close() @@ -116,3 +131,24 @@ func (c *Config) Close() error { } return err } + +func (c *Config) HasValidCompressionLevel() bool { + return c.Level == LevelDefault || + c.Level == LevelOptimal || + c.Level == LevelFastest || + c.Level == LevelSmallestSize +} + +func (c *Compressor) compressionLevel() zstd.EncoderLevel { + switch c.Level { + case LevelDefault, LevelOptimal: + return zstd.SpeedDefault + case LevelFastest: + return zstd.SpeedFastest + case LevelSmallestSize: + return zstd.SpeedBestCompression + default: + assert.Fail("unknown compression level", string(c.Level)) + return zstd.SpeedDefault + } +} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 44685524f..0418eedd0 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -6,6 +6,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "go.uber.org/zap" ) @@ -53,6 +54,10 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag func (b *BlobStor) Init(ctx context.Context) error { b.log.Debug(ctx, logs.BlobstorInitializing) + if !b.compression.HasValidCompressionLevel() { + b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) + b.compression.Level = compression.LevelDefault + } if err := b.compression.Init(); err != nil { return err } @@ -74,7 +79,7 @@ func (b *BlobStor) Close(ctx context.Context) error { for i := range b.storage { err := b.storage[i].Storage.Close(ctx) if err != nil { - b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) + b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err)) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 556f53e12..c155e15b8 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -75,8 +74,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi for _, err := range errors[:len(errors)-1] { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } return common.ExistsRes{}, errors[len(errors)-1] diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go index b5dbc9e40..3caee7ee1 100644 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ b/pkg/local_object_storage/blobstor/fstree/counter.go @@ -2,6 +2,8 @@ package fstree import ( "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) // FileCounter used to count files in FSTree. The implementation must be thread-safe. @@ -52,16 +54,11 @@ func (c *SimpleCounter) Dec(size uint64) { c.mtx.Lock() defer c.mtx.Unlock() - if c.count > 0 { - c.count-- - } else { - panic("fstree.SimpleCounter: invalid count") - } - if c.size >= size { - c.size -= size - } else { - panic("fstree.SimpleCounter: invalid size") - } + assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") + c.count-- + + assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") + c.size -= size } func (c *SimpleCounter) CountSize() (uint64, uint64) { diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 53eb0395a..112741ab4 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -45,7 +45,7 @@ type FSTree struct { log *logger.Logger - *compression.Config + compressor *compression.Compressor Depth uint64 DirNameLen int @@ -82,7 +82,7 @@ func New(opts ...Option) *FSTree { Permissions: 0o700, RootPath: "./", }, - Config: nil, + compressor: nil, Depth: 4, DirNameLen: DirNameLen, metrics: &noopMetrics{}, @@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr if err != nil { if prm.IgnoreErrors { t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("err", err.Error()), + zap.Error(err), zap.String("directory_path", dirPath)) return nil } @@ -196,13 +196,13 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err == nil { - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) } if err != nil { if prm.IgnoreErrors { t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", addr), - zap.String("err", err.Error()), + zap.Error(err), zap.String("path", path)) continue } @@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } if !prm.DontCompress { - prm.RawData = t.Compress(prm.RawData) + prm.RawData = t.compressor.Compress(prm.RawData) } size = len(prm.RawData) @@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err } } - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) if err != nil { return common.GetRes{}, err } @@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) { }, ) if err != nil { - return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } return count, size, nil @@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) { }, ) if err != nil { - return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } success = true return result, nil @@ -597,12 +597,12 @@ func (t *FSTree) Path() string { } // SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Config) { - t.Config = cc +func (t *FSTree) SetCompressor(cc *compression.Compressor) { + t.compressor = cc } -func (t *FSTree) Compressor() *compression.Config { - return t.Config +func (t *FSTree) Compressor() *compression.Compressor { + return t.compressor } // SetReportErrorFunc implements common.Storage. diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 4110ba7d7..6d633dad6 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -67,12 +67,9 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { err := w.writeFile(tmpPath, data) if err != nil { var pe *fs.PathError - if errors.As(err, &pe) { - switch pe.Err { - case syscall.ENOSPC: - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) - } + if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { + err = common.ErrNoSpace + _ = os.RemoveAll(tmpPath) } return err } @@ -136,6 +133,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error { if err := os.Remove(p); err != nil { return err } - w.fileCounter.Dec(uint64(size)) + w.fileCounter.Dec(size) return nil } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go index 3561c616b..49cbda344 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go @@ -69,10 +69,13 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { if err != nil { return err } + written := 0 tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10) n, err := unix.Write(fd, data) - if err == nil { - if n == len(data) { + for err == nil { + written += n + + if written == len(data) { err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW) if err == nil { w.fileCounter.Inc(uint64(len(data))) @@ -80,9 +83,23 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { if errors.Is(err, unix.EEXIST) { err = nil } - } else { - err = errors.New("incomplete write") + break } + + // From man 2 write: + // https://www.man7.org/linux/man-pages/man2/write.2.html + // + // Note that a successful write() may transfer fewer than count + // bytes. Such partial writes can occur for various reasons; for + // example, because there was insufficient space on the disk device + // to write all of the requested bytes, or because a blocked write() + // to a socket, pipe, or similar was interrupted by a signal handler + // after it had transferred some, but before it had transferred all + // of the requested bytes. In the event of a partial write, the + // caller can make another write() call to transfer the remaining + // bytes. The subsequent call will either transfer further bytes or + // may result in an error (e.g., if the disk is now full). + n, err = unix.Write(fd, data[written:]) } errClose := unix.Close(fd) if err != nil { @@ -114,7 +131,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error { return logicerr.Wrap(new(apistatus.ObjectNotFound)) } if err == nil { - w.fileCounter.Dec(uint64(size)) + w.fileCounter.Dec(size) } return err } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go new file mode 100644 index 000000000..7fae2e695 --- /dev/null +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go @@ -0,0 +1,42 @@ +//go:build linux && integration + +package fstree + +import ( + "context" + "errors" + "os" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +func TestENOSPC(t *testing.T) { + dir, err := os.MkdirTemp(t.TempDir(), "ramdisk") + require.NoError(t, err) + + f, err := os.CreateTemp(t.TempDir(), "ramdisk_*") + require.NoError(t, err) + + err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M") + if errors.Is(err, unix.EPERM) { + t.Skipf("skip size tests: no permission to mount: %v", err) + return + } + require.NoError(t, err) + defer func() { + require.NoError(t, unix.Unmount(dir, 0)) + }() + + fst := New(WithPath(dir), WithDepth(1)) + require.NoError(t, fst.Open(mode.ComponentReadWrite)) + require.NoError(t, fst.Init()) + + _, err = fst.Put(context.Background(), common.PutPrm{ + RawData: make([]byte, 10<<20), + }) + require.ErrorIs(t, err, common.ErrNoSpace) +} diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 7155ddcbb..6f2ac87e1 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -4,7 +4,6 @@ import ( "io/fs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) type Option func(*FSTree) @@ -53,6 +52,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = l.With(zap.String("component", "FSTree")) + f.log = l } } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index 36b2c33f8..d54c54f59 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -3,6 +3,7 @@ package blobstortest import ( "context" "errors" + "slices" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -26,7 +27,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { _, err := s.Delete(context.Background(), delPrm) require.NoError(t, err) - objects = append(objects[:delID], objects[delID+1:]...) + objects = slices.Delete(objects, delID, delID+1) runTestNormalHandler(t, s, objects) @@ -49,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) _, err := s.Iterate(context.Background(), iterPrm) require.NoError(t, err) - require.Equal(t, len(objects), len(seen)) + require.Len(t, objects, len(seen)) for i := range objects { d, ok := seen[objects[i].addr.String()] require.True(t, ok) diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go index 1ba835a95..ff1aa9d64 100644 --- a/pkg/local_object_storage/blobstor/iterate.go +++ b/pkg/local_object_storage/blobstor/iterate.go @@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.String("storage_path", b.storage[i].Storage.Path()), zap.String("storage_type", b.storage[i].Storage.Type()), - zap.String("err", err.Error())) + zap.Error(err)) continue } return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index ccfa510fe..2786321a8 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -8,6 +8,7 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -24,7 +25,9 @@ func TestIterateObjects(t *testing.T) { // create BlobStor instance blobStor := New( WithStorages(defaultStorages(p, smalSz)), - WithCompressObjects(true), + WithCompression(compression.Config{ + Enabled: true, + }), ) defer os.RemoveAll(p) diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 95a916662..3df96a1c3 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -16,7 +16,7 @@ func (s *memstoreImpl) Init() error func (s *memstoreImpl) Close(context.Context) error { return nil } func (s *memstoreImpl) Type() string { return Type } func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } +func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 0252c7983..7ef7e37a4 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, // Decompress the data. var err error if data, err = s.compression.Decompress(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // Unmarshal the SDK object. obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil @@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common elem := common.IterationElement{ ObjectData: v, } - if err := elem.Address.DecodeString(string(k)); err != nil { + if err := elem.Address.DecodeString(k); err != nil { if req.IgnoreErrors { continue } - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err)) + return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err)) } var err error if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil { diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 97a03993d..7605af4e5 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -7,7 +7,7 @@ import ( type cfg struct { rootPath string readOnly bool - compression *compression.Config + compression *compression.Compressor } func defaultConfig() *cfg { diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index af19e398e..80268fa7a 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { } } if err != nil { - return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) + return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) } b.mode = m diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go index 342da28bf..fe9c109dd 100644 --- a/pkg/local_object_storage/blobstor/put.go +++ b/pkg/local_object_storage/blobstor/put.go @@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e // marshal object data, err := prm.Object.Marshal() if err != nil { - return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err) + return common.PutRes{}, fmt.Errorf("marshal the object: %w", err) } prm.RawData = data } diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go index 2a6b94789..f28816555 100644 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ b/pkg/local_object_storage/blobstor/rebuild.go @@ -13,19 +13,14 @@ type StorageIDUpdate interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error { +func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error { var summary common.RebuildRes var rErr error for _, storage := range b.storage { res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{ - MetaStorage: upd, - WorkerLimiter: limiter, - FillPercent: fillPercent, + MetaStorage: upd, + Limiter: concLimiter, + FillPercent: fillPercent, }) summary.FilesRemoved += res.FilesRemoved summary.ObjectsMoved += res.ObjectsMoved diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index fb1188751..3a38ecf82 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -17,8 +17,8 @@ type cfg struct { Type func() string Path func() string - SetCompressor func(cc *compression.Config) - Compressor func() *compression.Config + SetCompressor func(cc *compression.Compressor) + Compressor func() *compression.Compressor SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) @@ -45,11 +45,11 @@ func WithClose(f func() error) Option { return func(c *cfg) { c func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } -func WithSetCompressor(f func(*compression.Config)) Option { +func WithSetCompressor(f func(*compression.Compressor)) Option { return func(c *cfg) { c.overrides.SetCompressor = f } } -func WithCompressor(f func() *compression.Config) Option { +func WithCompressor(f func() *compression.Compressor) Option { return func(c *cfg) { c.overrides.Compressor = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index 626ba0023..190b6a876 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -116,7 +116,7 @@ func (s *TestStore) Path() string { } } -func (s *TestStore) SetCompressor(cc *compression.Config) { +func (s *TestStore) SetCompressor(cc *compression.Compressor) { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Config) { } } -func (s *TestStore) Compressor() *compression.Config { +func (s *TestStore) Compressor() *compression.Compressor { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 24059a3f9..e0617a832 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -48,8 +48,9 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.containerSize(ctx, prm) - return err + var csErr error + res, csErr = e.containerSize(ctx, prm) + return csErr }) return @@ -69,12 +70,13 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er return res.Size(), nil } -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { + var res ContainerSizeRes + err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.Shard.ContainerSize(csPrm) + csRes, err := sh.ContainerSize(ctx, csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) @@ -86,7 +88,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) return false }) - return + return res, err } // ListContainers returns a unique container IDs presented in the engine objects. @@ -96,8 +98,9 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) defer elapsed("ListContainers", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.listContainers(ctx) - return err + var lcErr error + res, lcErr = e.listContainers(ctx) + return lcErr }) return @@ -118,8 +121,8 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { uniqueIDs := make(map[string]cid.ID) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { e.reportShardError(ctx, sh, "can't get list of containers", err) return false @@ -133,7 +136,9 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, } return false - }) + }); err != nil { + return ListContainersRes{}, err + } result := make([]cid.ID, 0, len(uniqueIDs)) for _, v := range uniqueIDs { diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index a5c53dcad..bf1649f6e 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -22,10 +22,6 @@ type shardInitError struct { // Open opens all StorageEngine's components. func (e *StorageEngine) Open(ctx context.Context) error { - return e.open(ctx) -} - -func (e *StorageEngine) open(ctx context.Context) error { e.mtx.Lock() defer e.mtx.Unlock() @@ -77,7 +73,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { errCh := make(chan shardInitError, len(e.shards)) var eg errgroup.Group - if e.cfg.lowMem && e.anyShardRequiresRefill() { + if e.lowMem && e.anyShardRequiresRefill() { eg.SetLimit(1) } @@ -95,7 +91,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := eg.Wait() close(errCh) if err != nil { - return fmt.Errorf("failed to initialize shards: %w", err) + return fmt.Errorf("initialize shards: %w", err) } for res := range errCh { @@ -117,7 +113,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { continue } - return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err) + return fmt.Errorf("initialize shard %s: %w", res.id, res.err) } } @@ -149,25 +145,19 @@ var errClosed = errors.New("storage engine is closed") func (e *StorageEngine) Close(ctx context.Context) error { close(e.closeCh) defer e.wg.Wait() - return e.setBlockExecErr(ctx, errClosed) + return e.closeEngine(ctx) } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) close(ctx context.Context, releasePools bool) error { +func (e *StorageEngine) closeAllShards(ctx context.Context) error { e.mtx.RLock() defer e.mtx.RUnlock() - if releasePools { - for _, p := range e.shardPools { - p.Release() - } - } - for id, sh := range e.shards { if err := sh.Close(ctx); err != nil { e.log.Debug(ctx, logs.EngineCouldNotCloseShard, zap.String("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -182,70 +172,23 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error { e.blockExec.mtx.RLock() defer e.blockExec.mtx.RUnlock() - if e.blockExec.err != nil { - return e.blockExec.err + if e.blockExec.closed { + return errClosed } return op() } -// sets the flag of blocking execution of all data operations according to err: -// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method -// (if err == errClosed => additionally releases pools and does not allow to resume executions). -// - otherwise, resumes execution. If exec was blocked, calls open method. -// -// Can be called concurrently with exec. In this case it waits for all executions to complete. -func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { +func (e *StorageEngine) closeEngine(ctx context.Context) error { e.blockExec.mtx.Lock() defer e.blockExec.mtx.Unlock() - prevErr := e.blockExec.err - - wasClosed := errors.Is(prevErr, errClosed) - if wasClosed { + if e.blockExec.closed { return errClosed } - e.blockExec.err = err - - if err == nil { - if prevErr != nil { // block -> ok - return e.open(ctx) - } - } else if prevErr == nil { // ok -> block - return e.close(ctx, errors.Is(err, errClosed)) - } - - // otherwise do nothing - - return nil -} - -// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err. -// To resume the execution, use ResumeExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources -// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -// -// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution -// for this. -func (e *StorageEngine) BlockExecution(err error) error { - return e.setBlockExecErr(context.Background(), err) -} - -// ResumeExecution resumes the execution of any data-related operation. -// To block the execution, use BlockExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources -// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -func (e *StorageEngine) ResumeExecution() error { - return e.setBlockExecErr(context.Background(), nil) + e.blockExec.closed = true + return e.closeAllShards(ctx) } type ReConfiguration struct { @@ -320,7 +263,7 @@ loop: for _, newID := range shardsToAdd { sh, err := e.createShard(ctx, rcfg.shards[newID]) if err != nil { - return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err) + return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err) } idStr := sh.ID().String() @@ -331,13 +274,13 @@ loop: } if err != nil { _ = sh.Close(ctx) - return fmt.Errorf("could not init %s shard: %w", idStr, err) + return fmt.Errorf("init %s shard: %w", idStr, err) } err = e.addShard(sh) if err != nil { _ = sh.Close(ctx) - return fmt.Errorf("could not add %s shard: %w", idStr, err) + return fmt.Errorf("add %s shard: %w", idStr, err) } e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr)) diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index c9efc312c..4ff0ed5ec 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -2,7 +2,6 @@ package engine import ( "context" - "errors" "fmt" "io/fs" "os" @@ -12,17 +11,14 @@ import ( "testing" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -163,42 +159,6 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O require.Equal(t, 1, shardCount) } -func TestExecBlocks(t *testing.T) { - e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many - - // put some object - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - - addr := object.AddressOf(obj) - - require.NoError(t, Put(context.Background(), e, obj, false)) - - // block executions - errBlock := errors.New("block exec err") - - require.NoError(t, e.BlockExecution(errBlock)) - - // try to exec some op - _, err := Head(context.Background(), e, addr) - require.ErrorIs(t, err, errBlock) - - // resume executions - require.NoError(t, e.ResumeExecution()) - - _, err = Head(context.Background(), e, addr) // can be any data-related op - require.NoError(t, err) - - // close - require.NoError(t, e.Close(context.Background())) - - // try exec after close - _, err = Head(context.Background(), e, addr) - require.Error(t, err) - - // try to resume - require.Error(t, e.ResumeExecution()) -} - func TestPersistentShardID(t *testing.T) { dir := t.TempDir() @@ -245,7 +205,6 @@ func TestReload(t *testing.T) { // no new paths => no new shards require.Equal(t, shardNum, len(e.shards)) - require.Equal(t, shardNum, len(e.shardPools)) newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) @@ -257,7 +216,6 @@ func TestReload(t *testing.T) { require.NoError(t, e.Reload(context.Background(), rcfg)) require.Equal(t, shardNum+1, len(e.shards)) - require.Equal(t, shardNum+1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -277,7 +235,6 @@ func TestReload(t *testing.T) { // removed one require.Equal(t, shardNum-1, len(e.shards)) - require.Equal(t, shardNum-1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -311,7 +268,6 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str } require.Equal(t, num, len(e.shards)) - require.Equal(t, num, len(e.shardPools)) return e, currShards } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 7164ff21f..223cdbc48 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -24,9 +23,6 @@ type DeletePrm struct { forceRemoval bool } -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct{} - // WithAddress is a Delete option to set the addresses of the objects to delete. // // Option is required. @@ -51,7 +47,7 @@ func (p *DeletePrm) WithForceRemoval() { // NOTE: Marks any object to be deleted (despite any prohibitions // on operations with that object) if WithForceRemoval option has // been provided. -func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) { +func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete", trace.WithAttributes( attribute.String("address", prm.addr.EncodeToString()), @@ -60,15 +56,12 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe defer span.End() defer elapsed("Delete", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - res, err = e.delete(ctx, prm) - return err + return e.execIfNotBlocked(func() error { + return e.delete(ctx, prm) }) - - return } -func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { +func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { var locked struct { is bool } @@ -78,7 +71,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e // Removal of a big object is done in multiple stages: // 1. Remove the parent object. If it is locked or already removed, return immediately. // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { var existsPrm shard.ExistsPrm existsPrm.Address = prm.addr @@ -123,20 +116,22 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e // If a parent object is removed we should set GC mark on each shard. return splitInfo == nil - }) + }); err != nil { + return err + } if locked.is { - return DeleteRes{}, new(apistatus.ObjectLocked) + return new(apistatus.ObjectLocked) } if splitInfo != nil { - e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) + return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } - return DeleteRes{}, nil + return nil } -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { +func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { var fs objectSDK.SearchFilters fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) @@ -149,13 +144,12 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo inhumePrm.ForceRemoval() } - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return false } @@ -166,8 +160,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.String("err", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } } @@ -196,8 +189,7 @@ func (e *StorageEngine) deleteChunks( if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.String("err", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } } diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 0dd2e94bb..a56598c09 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -70,8 +70,7 @@ func TestDeleteBigObject(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - _, err := e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -141,8 +140,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - _, err := e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -153,7 +151,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { // delete physical var delPrm shard.DeletePrm delPrm.SetAddresses(addrParent) - _, err = s1.Delete(context.Background(), delPrm) + _, err := s1.Delete(context.Background(), delPrm) require.NoError(t, err) delPrm.SetAddresses(addrLink) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 029904046..376d545d3 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -12,8 +12,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -28,16 +28,13 @@ type StorageEngine struct { shards map[string]hashedShard - shardPools map[string]util.WorkerPool - closeCh chan struct{} setModeCh chan setModeRequest wg sync.WaitGroup blockExec struct { - mtx sync.RWMutex - - err error + mtx sync.RWMutex + closed bool } evacuateLimiter *evacuationLimiter } @@ -140,7 +137,7 @@ func (e *StorageEngine) reportShardError( if isLogical(err) { e.log.Warn(ctx, msg, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -151,7 +148,7 @@ func (e *StorageEngine) reportShardError( e.log.Warn(ctx, msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), - zap.String("error", err.Error()), + zap.Error(err), }, fields...)...) if e.errorsThreshold == 0 || errCount < e.errorsThreshold { @@ -176,7 +173,10 @@ func (e *StorageEngine) reportShardError( } func isLogical(err error) bool { - return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) + return errors.As(err, &logicerr.Logical{}) || + errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) || + errors.As(err, new(*apistatus.ResourceExhausted)) } // Option represents StorageEngine's constructor option. @@ -189,8 +189,6 @@ type cfg struct { metrics MetricRegister - shardPoolSize uint32 - lowMem bool containerSource atomic.Pointer[containerSource] @@ -198,9 +196,8 @@ type cfg struct { func defaultCfg() *cfg { res := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - shardPoolSize: 20, - metrics: noopMetrics{}, + log: logger.NewLoggerWrapper(zap.L()), + metrics: noopMetrics{}, } res.containerSource.Store(&containerSource{}) return res @@ -214,13 +211,18 @@ func New(opts ...Option) *StorageEngine { opts[i](c) } + evLimMtx := &sync.RWMutex{} + evLimCond := sync.NewCond(evLimMtx) + return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - shardPools: make(map[string]util.WorkerPool), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{}, + cfg: c, + shards: make(map[string]hashedShard), + closeCh: make(chan struct{}), + setModeCh: make(chan setModeRequest), + evacuateLimiter: &evacuationLimiter{ + guard: evLimMtx, + statusCond: evLimCond, + }, } } @@ -237,13 +239,6 @@ func WithMetrics(v MetricRegister) Option { } } -// WithShardPoolSize returns option to specify size of worker pool for each shard. -func WithShardPoolSize(sz uint32) Option { - return func(c *cfg) { - c.shardPoolSize = sz - } -} - // WithErrorThreshold returns an option to specify size amount of errors after which // shard is moved to read-only mode. func WithErrorThreshold(sz uint32) Option { @@ -279,7 +274,7 @@ func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) ( return true, nil } - wasRemoved, err := container.WasRemoved(s.cs, id) + wasRemoved, err := container.WasRemoved(ctx, s.cs, id) if err != nil { return false, err } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index a7cb90bae..fc6d9ee9c 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -2,9 +2,14 @@ package engine import ( "context" + "fmt" "path/filepath" + "runtime/debug" + "strings" + "sync" "testing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -17,10 +22,12 @@ import ( "github.com/stretchr/testify/require" ) -type epochState struct{} +type epochState struct { + currEpoch uint64 +} func (s epochState) CurrentEpoch() uint64 { - return 0 + return s.currEpoch } type testEngineWrapper struct { @@ -53,7 +60,6 @@ func (te *testEngineWrapper) setShardsNumOpts( te.shardIDs[i] = shard.ID() } require.Len(t, te.engine.shards, num) - require.Len(t, te.engine.shardPools, num) return te } @@ -87,12 +93,17 @@ func testGetDefaultShardOptions(t testing.TB) []shard.Option { blobstor.WithLogger(test.NewLogger(t)), ), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - meta.WithLogger(test.NewLogger(t)), - ), + shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), + shard.WithLimiter(&testQoSLimiter{t: t}), + } +} + +func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option { + return []meta.Option{ + meta.WithPath(filepath.Join(t.TempDir(), "metabase")), + meta.WithPermissions(0o700), + meta.WithEpochState(epochState{}), + meta.WithLogger(test.NewLogger(t)), } } @@ -105,7 +116,8 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1), blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithLogger(test.NewLogger(t))), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < smallSize }, @@ -145,3 +157,78 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, }, smallFileStorage, largeFileStorage } + +var _ qos.Limiter = (*testQoSLimiter)(nil) + +type testQoSLimiter struct { + t testing.TB + quard sync.Mutex + id int64 + readStacks map[int64][]byte + writeStacks map[int64][]byte +} + +func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} + +func (t *testQoSLimiter) Close() { + t.quard.Lock() + defer t.quard.Unlock() + + var sb strings.Builder + var seqN int + for _, stack := range t.readStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) + } + for _, stack := range t.writeStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) + } + require.True(t.t, seqN == 0, sb.String()) +} + +func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.readStacks == nil { + t.readStacks = make(map[int64][]byte) + } + t.readStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.readStacks, id) + }, nil +} + +func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.writeStacks == nil { + t.writeStacks = make(map[int64][]byte) + } + t.writeStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.writeStacks, id) + }, nil +} + +func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index d68a7e826..57029dd5f 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -46,7 +46,6 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) var testShards [2]*testShard te := testNewEngine(t, - WithShardPoolSize(1), WithErrorThreshold(errThreshold), ). setShardsNumOpts(t, 2, func(id int) []shard.Option { diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index b88c249b1..c08dfbf03 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strings" "sync" "sync/atomic" @@ -14,8 +15,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -86,7 +85,6 @@ type EvacuateShardPrm struct { ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error) TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error) IgnoreErrors bool - Async bool Scope EvacuateScope RepOneOnly bool @@ -202,19 +200,14 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { return res } -type pooledShard struct { - hashedShard - pool util.WorkerPool -} - var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") // Evacuate moves data from one shard to the others. // The shard being moved must be in read-only mode. -func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) { +func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err() default: } @@ -226,7 +219,6 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate", trace.WithAttributes( attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), )) @@ -234,7 +226,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev shards, err := e.getActualShards(shardIDs, prm) if err != nil { - return nil, err + return err } shardsToEvacuate := make(map[string]*shard.Shard) @@ -247,46 +239,33 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev } res := NewEvacuateShardRes() - ctx = ctxOrBackground(ctx, prm.Async) - eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) + ctx = context.WithoutCancel(ctx) + eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) if err != nil { - return nil, err + return err } var mtx sync.RWMutex - copyShards := func() []pooledShard { + copyShards := func() []hashedShard { mtx.RLock() defer mtx.RUnlock() - t := make([]pooledShard, len(shards)) - copy(t, shards) + t := slices.Clone(shards) return t } eg.Go(func() error { - return e.evacuateShards(egCtx, shardIDs, prm, res, copyShards, shardsToEvacuate) + return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate) }) - if prm.Async { - return nil, nil - } - - return res, eg.Wait() -} - -func ctxOrBackground(ctx context.Context, background bool) context.Context { - if background { - return context.Background() - } - return ctx + return nil } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { var err error ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", trace.WithAttributes( attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), attribute.Bool("repOneOnly", prm.RepOneOnly), @@ -298,12 +277,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p }() e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + zap.Stringer("scope", prm.Scope)) err = e.getTotals(ctx, prm, shardsToEvacuate, res) if err != nil { e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + zap.Stringer("scope", prm.Scope)) return err } @@ -337,7 +316,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p } if err != nil { e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + zap.Stringer("scope", prm.Scope)) return err } @@ -403,7 +382,7 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha } func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", @@ -427,7 +406,7 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel } func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { sh := shardsToEvacuate[shardID] @@ -440,7 +419,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context } egContainer.Go(func() error { var skip bool - c, err := e.containerSource.Load().cs.Get(cnt) + c, err := e.containerSource.Load().cs.Get(ctx, cnt) if err != nil { if client.IsErrContainerNotFound(err) { skip = true @@ -494,14 +473,13 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context err := sh.IterateOverContainers(ctx, cntPrm) if err != nil { cancel(err) - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField) } return err } func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { sh := shardsToEvacuate[shardID] shards := getShards() @@ -531,7 +509,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, } func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", trace.WithAttributes( @@ -554,7 +532,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID), - evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) res.trEvacuated.Add(1) continue } @@ -564,26 +542,26 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } if moved { e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote, zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID), zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK), - evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) res.trEvacuated.Add(1) } else if prm.IgnoreErrors { res.trFailed.Add(1) e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } else { e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID) } } @@ -592,14 +570,14 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) { if prm.TreeHandler == nil { - return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) + return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) } return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh) } func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) (bool, string, error) { target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) if err != nil { @@ -669,15 +647,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar // findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, -) (pooledShard, bool, error) { + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, +) (hashedShard, bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) - var result pooledShard + var result hashedShard var found bool for _, target := range shards { select { case <-ctx.Done(): - return pooledShard{}, false, ctx.Err() + return hashedShard{}, false, ctx.Err() default: } @@ -705,7 +683,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora return result, found, nil } -func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) { +func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { e.mtx.RLock() defer e.mtx.RUnlock() @@ -735,18 +713,15 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) // We must have all shards, to have correct information about their // indexes in a sorted slice and set appropriate marks in the metabase. // Evacuated shard is skipped during put. - shards := make([]pooledShard, 0, len(e.shards)) + shards := make([]hashedShard, 0, len(e.shards)) for id := range e.shards { - shards = append(shards, pooledShard{ - hashedShard: hashedShard(e.shards[id]), - pool: e.shardPools[id], - }) + shards = append(shards, e.shards[id]) } return shards, nil } func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() @@ -770,8 +745,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI res.objFailed.Add(1) return nil } - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) return err } @@ -792,16 +766,14 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) if err != nil { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) return err } if moved { res.objEvacuated.Add(1) } else if prm.IgnoreErrors { res.objFailed.Add(1) - e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) } else { return fmt.Errorf("object %s was not replicated", addr) } @@ -819,7 +791,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool { } func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, ) (bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) for j := range shards { @@ -832,15 +804,14 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } - switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status { + switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr), - evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) return true, nil case putToShardExists, putToShardRemoved: res.objSkipped.Add(1) diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index 1e6b9ccb1..b75e8686d 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -3,6 +3,7 @@ package engine import ( "context" "fmt" + "slices" "sync" "time" @@ -94,8 +95,7 @@ func (s *EvacuationState) StartedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.startedAt == defaultTime { + if s.startedAt.IsZero() { return nil } return &s.startedAt @@ -105,8 +105,7 @@ func (s *EvacuationState) FinishedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.finishedAt == defaultTime { + if s.finishedAt.IsZero() { return nil } return &s.finishedAt @@ -123,8 +122,7 @@ func (s *EvacuationState) DeepCopy() *EvacuationState { if s == nil { return nil } - shardIDs := make([]string, len(s.shardIDs)) - copy(shardIDs, s.shardIDs) + shardIDs := slices.Clone(s.shardIDs) return &EvacuationState{ shardIDs: shardIDs, @@ -141,7 +139,8 @@ type evacuationLimiter struct { eg *errgroup.Group cancel context.CancelFunc - guard sync.RWMutex + guard *sync.RWMutex + statusCond *sync.Cond // used in unit tests } func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { @@ -167,6 +166,7 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res startedAt: time.Now().UTC(), result: result, } + l.statusCond.Broadcast() return l.eg, egCtx, nil } @@ -182,6 +182,7 @@ func (l *evacuationLimiter) Complete(err error) { l.state.processState = EvacuateProcessStateCompleted l.state.errMessage = errMsq l.state.finishedAt = time.Now().UTC() + l.statusCond.Broadcast() l.eg = nil } @@ -216,6 +217,7 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error { l.state = EvacuationState{} l.eg = nil l.cancel = nil + l.statusCond.Broadcast() return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index beab8384e..f2ba7d994 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -37,7 +37,7 @@ type containerStorage struct { latency time.Duration } -func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) { time.Sleep(cs.latency) v, ok := cs.cntmap[id] if !ok { @@ -49,7 +49,7 @@ func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) { return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } @@ -140,16 +140,17 @@ func TestEvacuateShardObjects(t *testing.T) { prm.Scope = EvacuateScopeObjects t.Run("must be read-only", func(t *testing.T) { - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.ErrorIs(t, err, ErrMustBeReadOnly) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) }) require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.NoError(t, err) - require.Equal(t, uint64(objPerShard), res.ObjectsEvacuated()) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated()) // We check that all objects are available both before and after shard removal. // First case is a real-world use-case. It ensures that an object can be put in presense @@ -186,20 +187,30 @@ func TestEvacuateShardObjects(t *testing.T) { } // Calling it again is OK, but all objects are already moved, so no new PUTs should be done. - res, err = e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st = testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(0), st.ObjectsEvacuated()) checkHasObjects(t) e.mtx.Lock() delete(e.shards, evacuateShardID) - delete(e.shardPools, evacuateShardID) e.mtx.Unlock() checkHasObjects(t) } +func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { + var st *EvacuationState + var err error + e.evacuateLimiter.waitForCompleted() + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + return st +} + func TestEvacuateObjectsNetwork(t *testing.T) { t.Parallel() @@ -242,15 +253,15 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ShardID = ids[0:1] prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.ErrorIs(t, err, errMustHaveTwoShards) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) prm.ObjectsHandler = acceptOneOf(objects, 2) - res, err = e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, uint64(2), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, uint64(2), st.ObjectsEvacuated()) }) t.Run("multiple shards, evacuate one", func(t *testing.T) { t.Parallel() @@ -267,16 +278,18 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, 2) prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, uint64(2), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, uint64(2), st.ObjectsEvacuated()) t.Run("no errors", func(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, 3) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(3), st.ObjectsEvacuated()) }) }) t.Run("multiple shards, evacuate many", func(t *testing.T) { @@ -305,16 +318,18 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, totalCount-1) prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, totalCount-1, res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, totalCount-1, st.ObjectsEvacuated()) t.Run("no errors", func(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, totalCount) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, totalCount, res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, totalCount, st.ObjectsEvacuated()) }) }) } @@ -344,9 +359,8 @@ func TestEvacuateCancellation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - res, err := e.Evacuate(ctx, prm) + err := e.Evacuate(ctx, prm) require.ErrorContains(t, err, "context canceled") - require.Equal(t, uint64(0), res.ObjectsEvacuated()) } func TestEvacuateCancellationByError(t *testing.T) { @@ -375,8 +389,9 @@ func TestEvacuateCancellationByError(t *testing.T) { prm.ObjectWorkerCount = 2 prm.ContainerWorkerCount = 2 - _, err := e.Evacuate(context.Background(), prm) - require.ErrorContains(t, err, "test error") + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), "test error") } func TestEvacuateSingleProcess(t *testing.T) { @@ -388,8 +403,8 @@ func TestEvacuateSingleProcess(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -406,20 +421,19 @@ func TestEvacuateSingleProcess(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - res, err := e.Evacuate(egCtx, prm) - require.NoError(t, err, "first evacuation failed") - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") return nil }) eg.Go(func() error { <-running - res, err := e.Evacuate(egCtx, prm) - require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed") - require.Equal(t, uint64(0), res.ObjectsEvacuated()) + require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed") close(blocker) return nil }) require.NoError(t, eg.Wait()) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, uint64(3), st.ObjectsEvacuated()) + require.Equal(t, st.ErrorMessage(), "") } func TestEvacuateObjectsAsync(t *testing.T) { @@ -431,8 +445,8 @@ func TestEvacuateObjectsAsync(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -458,9 +472,9 @@ func TestEvacuateObjectsAsync(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - res, err := e.Evacuate(egCtx, prm) - require.NoError(t, err, "first evacuation failed") - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") return nil }) @@ -483,12 +497,7 @@ func TestEvacuateObjectsAsync(t *testing.T) { close(blocker) - require.Eventually(t, func() bool { - st, err = e.GetEvacuationState(context.Background()) - return st.ProcessingStatus() == EvacuateProcessStateCompleted - }, 3*time.Second, 10*time.Millisecond, "invalid final state") - - require.NoError(t, err, "get final state failed") + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") require.NotNil(t, st.StartedAt(), "invalid final started at") require.NotNil(t, st.FinishedAt(), "invalid final finished at") @@ -534,14 +543,9 @@ func TestEvacuateTreesLocal(t *testing.T) { require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - res, err := e.Evacuate(context.Background(), prm) - require.NotNil(t, res, "sync evacuation result must be not nil") - require.NoError(t, err, "evacuation failed") - - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get evacuation state failed") - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count") require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count") require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") @@ -632,15 +636,9 @@ func TestEvacuateTreesRemote(t *testing.T) { require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - res, err := e.Evacuate(context.Background(), prm) - require.NotNil(t, res, "sync evacuation must return not nil") - require.NoError(t, err, "evacuation failed") + require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") + st = testWaitForEvacuationCompleted(t, e) - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get evacuation state failed") - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) - - require.NoError(t, err, "get final state failed") require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count") require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count") require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") @@ -754,11 +752,12 @@ func TestEvacuateShardObjectsRepOneOnly(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(4), res.ObjectsEvacuated()) - require.Equal(t, uint64(8), res.ObjectsSkipped()) - require.Equal(t, uint64(0), res.ObjectsFailed()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, "", st.ErrorMessage()) + require.Equal(t, uint64(4), st.ObjectsEvacuated()) + require.Equal(t, uint64(8), st.ObjectsSkipped()) + require.Equal(t, uint64(0), st.ObjectsFailed()) } func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { @@ -812,7 +811,17 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) start := time.Now() - _, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) + testWaitForEvacuationCompleted(t, e) t.Logf("evacuate took %v\n", time.Since(start)) require.NoError(t, err) } + +func (l *evacuationLimiter) waitForCompleted() { + l.guard.Lock() + defer l.guard.Unlock() + + for l.state.processState != EvacuateProcessStateCompleted { + l.statusCond.Wait() + } +} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 9d2b1c1b7..7dac9eb97 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool exists := false locked := false - e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Exists(ctx, shPrm) if err != nil { if client.IsErrObjectAlreadyRemoved(err) { @@ -50,7 +50,9 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } return false - }) + }); err != nil { + return false, false, err + } if alreadyRemoved { return false, false, new(apistatus.ObjectAlreadyRemoved) diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go index 1b51c10dc..9b3c0833f 100644 --- a/pkg/local_object_storage/engine/exists_test.go +++ b/pkg/local_object_storage/engine/exists_test.go @@ -42,7 +42,7 @@ func benchmarkExists(b *testing.B, shardNum int) { for range b.N { var shPrm shard.ExistsPrm shPrm.Address = addr - shPrm.ParentAddress = oid.Address{} + shPrm.ECParentAddress = oid.Address{} ok, _, err := e.exists(context.Background(), shPrm) if err != nil || ok { b.Fatalf("%t %v", ok, err) diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index c7145889b..0694c53f3 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -79,7 +78,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return GetRes{}, err + } if it.SplitInfo != nil { return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -98,7 +99,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } - it.tryGetFromBlobstore(ctx) + if err := it.tryGetFromBlobstore(ctx); err != nil { + return GetRes{}, err + } if it.Object == nil { return GetRes{}, it.OutError @@ -106,9 +109,8 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { if it.ShardWithMeta.Shard != nil && it.MetaError != nil { e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.String("error", it.MetaError.Error()), - zap.Stringer("address", prm.addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(it.MetaError), + zap.Stringer("address", prm.addr)) } } @@ -135,8 +137,8 @@ type getShardIterator struct { ecInfoErr *objectSDK.ECInfoError } -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.ShardPrm.SetIgnoreMeta(noMeta) @@ -189,13 +191,13 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { }) } -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) { +func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already visited. return false diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d6892f129..d436dd411 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -82,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) shPrm.SetAddress(prm.addr) shPrm.SetRaw(prm.raw) - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold res, err := sh.Head(ctx, shPrm) if err != nil { @@ -123,7 +123,9 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) } head = res.Object() return true - }) + }); err != nil { + return HeadRes{}, err + } if head != nil { return HeadRes{head: head}, nil diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e89a8d048..e5f7072e2 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -27,9 +26,6 @@ type InhumePrm struct { forceRemoval bool } -// InhumeRes encapsulates results of inhume operation. -type InhumeRes struct{} - // WithTarget sets a list of objects that should be inhumed and tombstone address // as the reason for inhume operation. // @@ -67,124 +63,226 @@ var errInhumeFailure = errors.New("inhume operation failed") // with that object) if WithForceRemoval option has been provided. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { +func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") defer span.End() defer elapsed("Inhume", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - res, err = e.inhume(ctx, prm) - return err + return e.execIfNotBlocked(func() error { + return e.inhume(ctx, prm) }) - - return } -func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { +func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { + addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) + if err != nil { + return err + } + var shPrm shard.InhumePrm if prm.forceRemoval { shPrm.ForceRemoval() } - for i := range prm.addrs { - if !prm.forceRemoval { - locked, err := e.IsLocked(ctx, prm.addrs[i]) - if err != nil { - e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, - zap.Error(err), - zap.Stringer("addr", prm.addrs[i]), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } else if locked { - return InhumeRes{}, new(apistatus.ObjectLocked) - } - } - + for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, prm.addrs[i]) + shPrm.SetTarget(*prm.tombstone, addrs...) } else { - shPrm.MarkAsGarbage(prm.addrs[i]) + shPrm.MarkAsGarbage(addrs...) } - ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true) - if err != nil { - return InhumeRes{}, err + sh, exists := e.shards[shardID] + if !exists { + e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard, + zap.Error(errors.New("this shard was expected to exist")), + zap.String("shard_id", shardID), + ) + return errInhumeFailure } - if !ok { - ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false) - if err != nil { - return InhumeRes{}, err - } else if !ok { - return InhumeRes{}, errInhumeFailure - } + + if _, err := sh.Inhume(ctx, shPrm); err != nil { + e.reportInhumeError(ctx, err, sh) + return err } } - return InhumeRes{}, nil + return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) } -// Returns ok if object was inhumed during this invocation or before. -func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) { - root := false - var existPrm shard.ExistsPrm - var retErr error - var ok bool +func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { + if err == nil { + return + } - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { - defer func() { - // if object is root we continue since information about it - // can be presented in other shards - if checkExists && root { - stop = false - } - }() + var errLocked *apistatus.ObjectLocked + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) + } +} - if checkExists { - existPrm.Address = addr - exRes, err := sh.Exists(ctx, existPrm) - if err != nil { - if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) { - // inhumed once - no need to be inhumed again - ok = true - return true - } +// inhumeNotFoundObjects removes object which are not found on any shard. +// +// Besides an object not being found on any shard, it is also important to +// remove it anyway in order to populate the metabase indexes because they are +// responsible for the correct object status, i.e., the status will be `object +// not found` without the indexes, the status will be `object is already +// removed` with the indexes. +// +// It is suggested to evenly remove those objects on each shard with the batch +// size equal to 1 + floor(number of objects / number of shards). +func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { + if len(addrs) == 0 { + return nil + } - var siErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) { - e.reportShardError(ctx, sh, "could not check for presents in shard", err, zap.Stringer("address", addr)) - return - } + var shPrm shard.InhumePrm + if prm.forceRemoval { + shPrm.ForceRemoval() + } - root = true - } else if !exRes.Exists() { - return - } + numObjectsPerShard := 1 + len(addrs)/len(e.shards) + + var inhumeErr error + itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + numObjects := min(numObjectsPerShard, len(addrs)) + + if numObjects == 0 { + return true } - _, err := sh.Inhume(ctx, prm) + if prm.tombstone != nil { + shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) + } else { + shPrm.MarkAsGarbage(addrs[:numObjects]...) + } + addrs = addrs[numObjects:] + + _, inhumeErr = hs.Inhume(ctx, shPrm) + e.reportInhumeError(ctx, inhumeErr, hs) + return inhumeErr != nil + }) + if inhumeErr != nil { + return inhumeErr + } + return itErr +} + +// groupObjectsByShard groups objects based on the shard(s) they are stored on. +// +// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of +// the objects are locked. +// +// Returns two sets of objects: found objects which are grouped per shard and +// not found object. Not found objects are objects which are not found on any +// shard. This can happen if a node is a container node but doesn't participate +// in a replica group of the object. +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { + groups = make(map[string][]oid.Address) + + var ids []string + for _, addr := range addrs { + ids, err = e.findShards(ctx, addr, checkLocked) if err != nil { - var errLocked *apistatus.ObjectLocked - switch { - case errors.As(err, &errLocked): + return + } + + if len(ids) == 0 { + notFoundObjects = append(notFoundObjects, addr) + continue + } + + for _, id := range ids { + groups[id] = append(groups[id], addr) + } + } + + return +} + +// findShards determines the shard(s) where the object is stored. +// +// If the object is a root object, multiple shards will be returned. +// +// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of +// the objects are locked. +func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) { + var ( + ids []string + retErr error + + prm shard.ExistsPrm + + siErr *objectSDK.SplitInfoError + ecErr *objectSDK.ECInfoError + + isRootObject bool + objectExists bool + ) + + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + objectExists = false + + prm.Address = addr + switch res, err := sh.Exists(ctx, prm); { + case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err): + // NOTE(@a-savchuk): there were some considerations that we can stop + // immediately if the object is already removed or expired. However, + // the previous method behavior was: + // - keep iterating if it's a root object and already removed, + // - stop iterating if it's not a root object and removed. + // + // Since my task was only improving method speed, let's keep the + // previous method behavior. Continue if it's a root object. + return !isRootObject + case errors.As(err, &siErr) || errors.As(err, &ecErr): + isRootObject = true + objectExists = true + case err != nil: + e.reportShardError( + ctx, sh, "couldn't check for presence in shard", + err, zap.Stringer("address", addr), + ) + case res.Exists(): + objectExists = true + default: + } + + if checkLocked { + if isLocked, err := sh.IsLocked(ctx, addr); err != nil { + e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, + zap.Error(err), + zap.Stringer("address", addr), + ) + } else if isLocked { retErr = new(apistatus.ObjectLocked) return true - case errors.Is(err, shard.ErrLockObjectRemoval): - retErr = meta.ErrLockObjectRemoval - return true - case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode): - retErr = err - return true } - - e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", addr)) - return false } - ok = true - return true - }) + // This exit point must come after checking if the object is locked, + // since the locked index may be populated even if the object doesn't + // exist. + if !objectExists { + return + } - return ok, retErr + ids = append(ids, sh.ID().String()) + + // Continue if it's a root object. + return !isRootObject + }); err != nil { + return nil, err + } + + if retErr != nil { + return nil, retErr + } + return ids, nil } // IsLocked checks whether an object is locked according to StorageEngine's state. @@ -199,17 +297,18 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var err error var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locked, err = h.Shard.IsLocked(ctx, addr) + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { + locked, err = h.IsLocked(ctx, addr) if err != nil { - e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) outErr = err return false } return locked - }) + }); err != nil { + return false, err + } if locked { return locked, nil @@ -218,48 +317,52 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e return locked, outErr } -// GetLocked return lock id's if object is locked according to StorageEngine's state. -func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocked", +// GetLocks return lock id's if object is locked according to StorageEngine's state. +func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks", trace.WithAttributes( attribute.String("address", addr.EncodeToString()), )) defer span.End() - var locked []oid.ID + var allLocks []oid.ID var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - ld, err := h.Shard.GetLocked(ctx, addr) + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { + locks, err := h.GetLocks(ctx, addr) if err != nil { - e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) outErr = err } - locked = append(locked, ld...) + allLocks = append(allLocks, locks...) return false - }) - if len(locked) > 0 { - return locked, nil + }); err != nil { + return nil, err } - return locked, outErr + if len(allLocks) > 0 { + return allLocks, nil + } + return allLocks, outErr } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) return true default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) + } } func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredLocks(ctx, epoch, lockers) select { @@ -269,11 +372,13 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) + } } func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleDeletedLocks(ctx, lockers) select { @@ -283,26 +388,25 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) + } } func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerSizePrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -314,7 +418,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid var drop []cid.ID for id := range idMap { prm.SetContainerID(id) - s, err := sh.ContainerSize(prm) + s, err := sh.ContainerSize(ctx, prm) if err != nil { e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true @@ -329,13 +433,15 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -353,12 +459,13 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerSize(id.EncodeToString()) } @@ -368,19 +475,16 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerCountPrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -407,13 +511,15 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -431,12 +537,13 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerCount(id.EncodeToString()) } diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 6980afb07..0e268cd23 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -2,14 +2,24 @@ package engine import ( "context" + "fmt" + "strconv" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestStorageEngine_Inhume(t *testing.T) { @@ -46,7 +56,7 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) addrs, err := Select(context.Background(), e, cnr, false, fs) @@ -76,7 +86,7 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) addrs, err := Select(context.Background(), e, cnr, false, fs) @@ -84,3 +94,249 @@ func TestStorageEngine_Inhume(t *testing.T) { require.Empty(t, addrs) }) } + +func TestStorageEngine_ECInhume(t *testing.T) { + parentObjectAddress := oidtest.Address() + containerID := parentObjectAddress.Container() + + chunkObject0 := testutil.GenerateObjectWithCID(containerID) + chunkObject0.SetECHeader(objectSDK.NewECHeader( + objectSDK.ECParentInfo{ + ID: parentObjectAddress.Object(), + }, 0, 4, []byte{}, 0)) + + chunkObject1 := testutil.GenerateObjectWithCID(containerID) + chunkObject1.SetECHeader(objectSDK.NewECHeader( + objectSDK.ECParentInfo{ + ID: parentObjectAddress.Object(), + }, 1, 4, []byte{}, 0)) + + tombstone := objectSDK.NewTombstone() + tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()}) + payload, err := tombstone.Marshal() + require.NoError(t, err) + tombstoneObject := testutil.GenerateObjectWithCID(containerID) + tombstoneObject.SetType(objectSDK.TypeTombstone) + tombstoneObject.SetPayload(payload) + tombstoneObjectAddress := object.AddressOf(tombstoneObject) + + e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() + + require.NoError(t, Put(context.Background(), e, chunkObject0, false)) + + require.NoError(t, Put(context.Background(), e, tombstoneObject, false)) + + var inhumePrm InhumePrm + inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress) + err = e.Inhume(context.Background(), inhumePrm) + require.NoError(t, err) + + var alreadyRemoved *apistatus.ObjectAlreadyRemoved + + require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved) + + require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved) +} + +func TestInhumeExpiredRegularObject(t *testing.T) { + t.Parallel() + + const currEpoch = 42 + const objectExpiresAfter = currEpoch - 1 + + engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{ + shard.WithDisabledGC(), + shard.WithMetaBaseOptions(append( + testGetDefaultMetabaseOptions(t), + meta.WithEpochState(epochState{currEpoch}), + )...), + } + }).prepare(t).engine + + cnr := cidtest.ID() + + generateAndPutObject := func() *objectSDK.Object { + obj := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) + + var putPrm PutPrm + putPrm.Object = obj + require.NoError(t, engine.Put(context.Background(), putPrm)) + return obj + } + + t.Run("inhume with tombstone", func(t *testing.T) { + obj := generateAndPutObject() + ts := oidtest.Address() + ts.SetContainer(cnr) + + var prm InhumePrm + prm.WithTarget(ts, object.AddressOf(obj)) + err := engine.Inhume(context.Background(), prm) + require.NoError(t, err) + }) + + t.Run("inhume without tombstone", func(t *testing.T) { + obj := generateAndPutObject() + + var prm InhumePrm + prm.MarkAsGarbage(object.AddressOf(obj)) + err := engine.Inhume(context.Background(), prm) + require.NoError(t, err) + }) +} + +func BenchmarkInhumeMultipart(b *testing.B) { + // The benchmark result insignificantly depends on the number of shards, + // so do not use it as a benchmark parameter, just set it big enough. + numShards := 100 + + for numObjects := 1; numObjects <= 10000; numObjects *= 10 { + b.Run( + fmt.Sprintf("objects=%d", numObjects), + func(b *testing.B) { + benchmarkInhumeMultipart(b, numShards, numObjects) + }, + ) + } +} + +func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { + b.StopTimer() + + engine := testNewEngine(b). + setShardsNum(b, numShards).prepare(b).engine + defer func() { require.NoError(b, engine.Close(context.Background())) }() + + cnt := cidtest.ID() + eg := errgroup.Group{} + + for range b.N { + addrs := make([]oid.Address, numObjects) + + for i := range numObjects { + prm := PutPrm{} + + prm.Object = objecttest.Object().Parent() + prm.Object.SetContainerID(cnt) + prm.Object.SetType(objectSDK.TypeRegular) + + addrs[i] = object.AddressOf(prm.Object) + + eg.Go(func() error { + return engine.Put(context.Background(), prm) + }) + } + require.NoError(b, eg.Wait()) + + ts := oidtest.Address() + ts.SetContainer(cnt) + + prm := InhumePrm{} + prm.WithTarget(ts, addrs...) + + b.StartTimer() + err := engine.Inhume(context.Background(), prm) + require.NoError(b, err) + b.StopTimer() + } +} + +func TestInhumeIfObjectDoesntExist(t *testing.T) { + const numShards = 4 + + engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine + t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) + + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, true) + }) + + t.Run("object is locked", func(t *testing.T) { + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, engine, false, true) + }) + }) +} + +func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) + + err := testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + +func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testLockObject(e, object)) + + err := testInhumeObject(t, e, object, withTombstone, withForce) + if !withForce { + var errLocked *apistatus.ObjectLocked + require.ErrorAs(t, err, &errLocked) + return + } + require.NoError(t, err) + + err = testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + +func testLockObject(e *StorageEngine, obj oid.Address) error { + return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) +} + +func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { + tombstone := oidtest.Address() + tombstone.SetContainer(obj.Container()) + + // Due to the tests design it is possible to set both the options, + // however removal with tombstone and force removal are exclusive. + require.False(t, withTombstone && withForce) + + var inhumePrm InhumePrm + if withTombstone { + inhumePrm.WithTarget(tombstone, obj) + } else { + inhumePrm.MarkAsGarbage(obj) + } + if withForce { + inhumePrm.WithForceRemoval() + } + return e.Inhume(context.Background(), inhumePrm) +} + +func testHeadObject(e *StorageEngine, obj oid.Address) error { + var headPrm HeadPrm + headPrm.WithAddress(obj) + + _, err := e.Head(context.Background(), headPrm) + return err +} diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 5d43e59df..3b0cf74f9 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -41,11 +41,19 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { + st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: - switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { + st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: @@ -61,13 +69,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { // code is pretty similar to inhumeAddr, maybe unify? root := false var addrLocked oid.Address addrLocked.SetContainer(idCnr) addrLocked.SetObject(locked) - e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) { + retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { defer func() { // if object is root we continue since information about it // can be presented in other shards @@ -84,17 +92,11 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError if errors.As(err, &eiErr) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err = objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return false - } - eclocked = append(eclocked, objID) + eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) + if !ok { + return false } + err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), @@ -137,3 +139,18 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo }) return } + +func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { + eclocked := []oid.ID{locked} + for _, chunk := range eiErr.ECInfo().Chunks { + var objID oid.ID + err := objID.ReadFromV2(chunk.ID) + if err != nil { + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) + return nil, false + } + eclocked = append(eclocked, objID) + } + return eclocked, true +} diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index feca9cb69..b8c9d6b1d 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -114,7 +114,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -127,7 +127,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombForLockAddr, lockerAddr) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorIs(t, err, meta.ErrLockObjectRemoval) // 5. @@ -136,7 +136,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) require.Eventually(t, func() bool { - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -200,7 +200,7 @@ func TestLockExpiration(t *testing.T) { inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 3. @@ -212,7 +212,7 @@ func TestLockExpiration(t *testing.T) { inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) require.Eventually(t, func() bool { - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -270,12 +270,12 @@ func TestLockForceRemoval(t *testing.T) { inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -283,12 +283,64 @@ func TestLockForceRemoval(t *testing.T) { deletePrm.WithAddress(objectcore.AddressOf(lock)) deletePrm.WithForceRemoval() - _, err = e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) // 5. inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) } + +func TestLockExpiredRegularObject(t *testing.T) { + const currEpoch = 42 + const objectExpiresAfter = currEpoch - 1 + + engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{ + shard.WithDisabledGC(), + shard.WithMetaBaseOptions(append( + testGetDefaultMetabaseOptions(t), + meta.WithEpochState(epochState{currEpoch}), + )...), + } + }).prepare(t).engine + + cnr := cidtest.ID() + + object := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) + + address := objectcore.AddressOf(object) + + var putPrm PutPrm + putPrm.Object = object + require.NoError(t, engine.Put(context.Background(), putPrm)) + + var getPrm GetPrm + var errNotFound *apistatus.ObjectNotFound + + getPrm.WithAddress(address) + _, err := engine.Get(context.Background(), getPrm) + require.ErrorAs(t, err, &errNotFound) + + t.Run("lock expired regular object", func(t *testing.T) { + engine.Lock(context.Background(), + address.Container(), + oidtest.ID(), + []oid.ID{address.Object()}, + ) + + res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object)) + require.NoError(t, err) + require.True(t, res) + }) + + t.Run("get expired and locked regular object", func(t *testing.T) { + getPrm.WithAddress(objectcore.AddressOf(object)) + + res, err := engine.Get(context.Background(), getPrm) + require.NoError(t, err) + require.Equal(t, res.Object(), object) + }) +} diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go index 75936206d..963292d83 100644 --- a/pkg/local_object_storage/engine/metrics.go +++ b/pkg/local_object_storage/engine/metrics.go @@ -7,34 +7,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) -type MetricRegister interface { - AddMethodDuration(method string, d time.Duration) - - SetObjectCounter(shardID, objectType string, v uint64) - AddToObjectCounter(shardID, objectType string, delta int) - - SetMode(shardID string, mode mode.Mode) - - AddToContainerSize(cnrID string, size int64) - DeleteContainerSize(cnrID string) - DeleteContainerCount(cnrID string) - AddToPayloadCounter(shardID string, size int64) - IncErrorCounter(shardID string) - ClearErrorCounter(shardID string) - DeleteShardMetrics(shardID string) - - SetContainerObjectCounter(shardID, contID, objectType string, v uint64) - IncContainerObjectCounter(shardID, contID, objectType string) - SubContainerObjectCounter(shardID, contID, objectType string, v uint64) - - IncRefillObjectsCount(shardID, path string, size int, success bool) - SetRefillPercent(shardID, path string, percent uint32) - SetRefillStatus(shardID, path, status string) - SetEvacuationInProgress(shardID string, value bool) - - WriteCache() metrics.WriteCacheMetrics - GC() metrics.GCMetrics -} +type ( + MetricRegister = metrics.EngineMetrics + GCMetrics = metrics.GCMetrics + WriteCacheMetrics = metrics.WriteCacheMetrics + NullBool = metrics.NullBool +) func elapsed(method string, addFunc func(method string, d time.Duration)) func() { t := time.Now() @@ -76,9 +54,9 @@ type ( ) var ( - _ MetricRegister = noopMetrics{} - _ metrics.WriteCacheMetrics = noopWriteCacheMetrics{} - _ metrics.GCMetrics = noopGCMetrics{} + _ MetricRegister = noopMetrics{} + _ WriteCacheMetrics = noopWriteCacheMetrics{} + _ GCMetrics = noopGCMetrics{} ) func (noopMetrics) AddMethodDuration(string, time.Duration) {} @@ -99,8 +77,8 @@ func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {} func (noopMetrics) SetRefillPercent(string, string, uint32) {} func (noopMetrics) SetRefillStatus(string, string, string) {} func (noopMetrics) SetEvacuationInProgress(string, bool) {} -func (noopMetrics) WriteCache() metrics.WriteCacheMetrics { return noopWriteCacheMetrics{} } -func (noopMetrics) GC() metrics.GCMetrics { return noopGCMetrics{} } +func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} } +func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} } func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {} func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index e080191ae..10cf5ffd5 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -9,8 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -71,21 +69,21 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // In #1146 this check was parallelized, however, it became // much slower on fast machines for 4 shards. - var parent oid.Address + var ecParent oid.Address if prm.Object.ECHeader() != nil { - parent.SetObject(prm.Object.ECHeader().Parent()) - parent.SetContainer(addr.Container()) + ecParent.SetObject(prm.Object.ECHeader().Parent()) + ecParent.SetContainer(addr.Container()) } var shPrm shard.ExistsPrm shPrm.Address = addr - shPrm.ParentAddress = parent + shPrm.ECParentAddress = ecParent existed, locked, err := e.exists(ctx, shPrm) if err != nil { return err } if !existed && locked { - lockers, err := e.GetLocked(ctx, parent) + lockers, err := e.GetLocks(ctx, ecParent) if err != nil { return err } @@ -98,17 +96,19 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } var shRes putToShardRes - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() - pool, ok := e.shardPools[sh.ID().String()] + _, ok := e.shards[sh.ID().String()] e.mtx.RUnlock() if !ok { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer) + shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown - }) + }); err != nil { + return err + } switch shRes.status { case putToShardUnknown: return errPutShard @@ -123,73 +123,59 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. -func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, +func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, ) (res putToShardRes) { - exitCh := make(chan struct{}) + var existPrm shard.ExistsPrm + existPrm.Address = addr - if err := pool.Submit(func() { - defer close(exitCh) - - var existPrm shard.ExistsPrm - existPrm.Address = addr - - exists, err := sh.Exists(ctx, existPrm) - if err != nil { - if shard.IsErrObjectExpired(err) { - // object is already found but - // expired => do nothing with it - res.status = putToShardExists - } else { - e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } - - return // this is not ErrAlreadyRemoved error so we can go to the next shard - } - - if exists.Exists() { + exists, err := sh.Exists(ctx, existPrm) + if err != nil { + if shard.IsErrObjectExpired(err) { + // object is already found but + // expired => do nothing with it res.status = putToShardExists - return + } else { + e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) } - var putPrm shard.PutPrm - putPrm.SetObject(obj) - putPrm.SetIndexAttributes(isIndexedContainer) - - _, err = sh.Put(ctx, putPrm) - if err != nil { - if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || - errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - return - } - if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - res.status = putToShardRemoved - res.err = err - return - } - - e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) - return - } - - res.status = putToShardSuccess - }); err != nil { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err)) - close(exitCh) + return // this is not ErrAlreadyRemoved error so we can go to the next shard } - <-exitCh + if exists.Exists() { + res.status = putToShardExists + return + } + + var putPrm shard.PutPrm + putPrm.SetObject(obj) + putPrm.SetIndexAttributes(isIndexedContainer) + + _, err = sh.Put(ctx, putPrm) + if err != nil { + if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || + errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + return + } + if client.IsErrObjectAlreadyRemoved(err) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + res.status = putToShardRemoved + res.err = err + return + } + + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) + return + } + + res.status = putToShardSuccess return } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 0c9cea903..7ec4742d8 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -94,7 +93,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return RngRes{}, err + } if it.SplitInfo != nil { return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -110,7 +111,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } - it.tryGetFromBlobstor(ctx) + if err := it.tryGetFromBlobstor(ctx); err != nil { + return RngRes{}, err + } if it.Object == nil { return RngRes{}, it.OutError @@ -118,9 +121,8 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error if it.ShardWithMeta.Shard != nil && it.MetaError != nil { e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.String("error", it.MetaError.Error()), - zap.Stringer("address", prm.addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(it.MetaError), + zap.Stringer("address", prm.addr)) } } @@ -159,8 +161,8 @@ type getRangeShardIterator struct { Engine *StorageEngine } -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.HasDegraded = i.HasDegraded || noMeta i.ShardPrm.SetIgnoreMeta(noMeta) @@ -211,13 +213,13 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { }) } -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) { +func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already processed it without a metabase. return false diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go index 83c6a54ed..a29dd7ed9 100644 --- a/pkg/local_object_storage/engine/rebuild.go +++ b/pkg/local_object_storage/engine/rebuild.go @@ -4,6 +4,7 @@ import ( "context" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" @@ -41,7 +42,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } resGuard := &sync.Mutex{} - limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit) + concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)} eg, egCtx := errgroup.WithContext(ctx) for _, shardID := range prm.ShardIDs { @@ -61,7 +62,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{ - ConcurrencyLimiter: limiter, + ConcurrencyLimiter: concLimiter, TargetFillPercent: prm.TargetFillPercent, }) @@ -88,3 +89,20 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } return res, nil } + +type concurrencyLimiter struct { + semaphore chan struct{} +} + +func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + select { + case l.semaphore <- struct{}{}: + return l.releaseWorkSlot, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (l *concurrencyLimiter) releaseWorkSlot() { + <-l.semaphore +} diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 02149b4c8..4243a5481 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -54,8 +54,9 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e._select(ctx, prm) - return err + var sErr error + res, sErr = e._select(ctx, prm) + return sErr }) return @@ -65,13 +66,11 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) - var outError error - var shPrm shard.SelectPrm shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -86,11 +85,13 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, - }, outError + }, nil } // List returns `limit` available physically storage object addresses in engine. @@ -100,8 +101,9 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.list(ctx, limit) - return err + var lErr error + res, lErr = e.list(ctx, limit) + return lErr }) return @@ -113,7 +115,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro ln := uint64(0) // consider iterating over shuffled shards - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -132,7 +134,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 898f685ec..69067c500 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -11,10 +11,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/hrw" "github.com/google/uuid" - "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) @@ -108,15 +110,15 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) { func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) { sh, err := e.createShard(ctx, opts) if err != nil { - return nil, fmt.Errorf("could not create a shard: %w", err) + return nil, fmt.Errorf("create a shard: %w", err) } err = e.addShard(sh) if err != nil { - return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err) + return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } - e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) + e.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } @@ -124,7 +126,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) { id, err := generateShardID() if err != nil { - return nil, fmt.Errorf("could not generate shard ID: %w", err) + return nil, fmt.Errorf("generate shard ID: %w", err) } opts = e.appendMetrics(id, opts) @@ -178,11 +180,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { e.mtx.Lock() defer e.mtx.Unlock() - pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) - if err != nil { - return fmt.Errorf("could not create pool: %w", err) - } - strID := sh.ID().String() if _, ok := e.shards[strID]; ok { return fmt.Errorf("shard with id %s was already added", strID) @@ -196,8 +193,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { hash: hrw.StringHash(strID), } - e.shardPools[strID] = pool - return nil } @@ -222,12 +217,6 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { ss = append(ss, sh) delete(e.shards, id) - pool, ok := e.shardPools[id] - if ok { - pool.Release() - delete(e.shardPools, id) - } - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", id)) } @@ -272,7 +261,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) h := hrw.StringHash(objAddr.EncodeToString()) shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, hashedShard(sh)) + shards = append(shards, sh) } hrw.SortHasherSliceByValue(shards, h) return shards @@ -285,26 +274,38 @@ func (e *StorageEngine) unsortedShards() []hashedShard { shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, hashedShard(sh)) + shards = append(shards, sh) } return shards } -func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { for i, sh := range e.sortShards(addr) { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(i, sh) { break } } + return nil } -func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { for _, sh := range e.unsortedShards() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(sh) { break } } + return nil } // SetShardMode sets mode of the shard with provided identifier. @@ -329,8 +330,6 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M // HandleNewEpoch notifies every shard about NewEpoch event. func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { - ev := shard.EventNewEpoch(epoch) - e.mtx.RLock() defer e.mtx.RUnlock() @@ -338,7 +337,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { select { case <-ctx.Done(): return - case sh.NotificationChannel() <- ev: + case sh.NotificationChannel() <- epoch: default: e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) @@ -374,7 +373,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err)) multiErrGuard.Unlock() } @@ -385,7 +384,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err)) multiErrGuard.Unlock() } return nil @@ -426,12 +425,6 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha delete(e.shards, idStr) - pool, ok := e.shardPools[idStr] - if ok { - pool.Release() - delete(e.shardPools, idStr) - } - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } @@ -442,3 +435,48 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha func (s hashedShard) Hash() uint64 { return s.hash } + +func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) { + var err error + var info []shard.Info + prm := shard.ExistsPrm{ + Address: obj, + } + var siErr *objectSDK.SplitInfoError + var ecErr *objectSDK.ECInfoError + + if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + res, exErr := hs.Exists(ctx, prm) + if exErr != nil { + if client.IsErrObjectAlreadyRemoved(exErr) { + err = new(apistatus.ObjectAlreadyRemoved) + return true + } + + // Check if error is either SplitInfoError or ECInfoError. + // True means the object is virtual. + if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) { + info = append(info, hs.DumpInfo()) + return false + } + + if shard.IsErrObjectExpired(exErr) { + err = exErr + return true + } + + if !client.IsErrObjectNotFound(exErr) { + e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address)) + } + + return false + } + if res.Exists() { + info = append(info, hs.DumpInfo()) + } + return false + }); itErr != nil { + return nil, itErr + } + return info, err +} diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 0bbc7563c..3aa9629b0 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -17,7 +17,6 @@ func TestRemoveShard(t *testing.T) { e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() - require.Equal(t, numOfShards, len(e.shardPools)) require.Equal(t, numOfShards, len(e.shards)) removedNum := numOfShards / 2 @@ -37,7 +36,6 @@ func TestRemoveShard(t *testing.T) { } } - require.Equal(t, numOfShards-removedNum, len(e.shardPools)) require.Equal(t, numOfShards-removedNum, len(e.shards)) for id, removed := range mSh { diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 268b4adfa..cfd15b4d4 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.opentelemetry.io/otel/attribute" @@ -39,8 +38,7 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return nil, err @@ -73,8 +71,7 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return nil, err } @@ -102,8 +99,7 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -130,8 +126,7 @@ func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeI if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -162,8 +157,7 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -195,8 +189,7 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -227,8 +220,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -238,7 +230,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree } // TreeSortedByFilename implements the pilorama.Forest interface. -func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", trace.WithAttributes( attribute.String("container_id", cid.EncodeToString()), @@ -249,7 +241,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, var err error var nodes []pilorama.MultiNodeInfo - var cursor *string + var cursor *pilorama.Cursor for _, sh := range e.sortShards(cid) { nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) if err != nil { @@ -259,8 +251,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -291,8 +282,7 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -321,8 +311,7 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -350,8 +339,7 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, } e.reportShardError(ctx, sh, "can't perform `TreeList`", err, - zap.Stringer("cid", cid), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Stringer("cid", cid)) // returns as much info about // trees as possible @@ -417,8 +405,7 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -444,8 +431,7 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't read tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go index 383c596af..52b199b0b 100644 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ b/pkg/local_object_storage/internal/testutil/generators.go @@ -1,7 +1,9 @@ package testutil import ( + cryptorand "crypto/rand" "encoding/binary" + "math/rand" "sync/atomic" "testing" @@ -9,7 +11,6 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" - "golang.org/x/exp/rand" ) // AddressGenerator is the interface of types that generate object addresses. @@ -61,7 +62,7 @@ var _ ObjectGenerator = &SeqObjGenerator{} func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { data := make([]byte, sz) - _, _ = rand.Read(data) + _, _ = cryptorand.Read(data) obj := GenerateObjectWithCIDWithPayload(cid, data) obj.SetID(oid) return obj @@ -82,7 +83,7 @@ var _ ObjectGenerator = &RandObjGenerator{} func (g *RandObjGenerator) Next() *objectSDK.Object { var id oid.ID - _, _ = rand.Read(id[:]) + _, _ = cryptorand.Read(id[:]) return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) } diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go index 60e9211d5..1087e40be 100644 --- a/pkg/local_object_storage/internal/testutil/object.go +++ b/pkg/local_object_storage/internal/testutil/object.go @@ -1,6 +1,7 @@ package testutil import ( + "crypto/rand" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -11,7 +12,6 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) const defaultDataSize = 32 diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go new file mode 100644 index 000000000..de1479e6f --- /dev/null +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -0,0 +1,82 @@ +package meta + +import ( + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "go.etcd.io/bbolt" +) + +type bucketCache struct { + locked *bbolt.Bucket + graveyard *bbolt.Bucket + garbage *bbolt.Bucket + expired map[cid.ID]*bbolt.Bucket + primary map[cid.ID]*bbolt.Bucket +} + +func newBucketCache() *bucketCache { + return &bucketCache{} +} + +func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(bucketNameLocked) + } + return getBucket(&bc.locked, tx, bucketNameLocked) +} + +func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(graveyardBucketName) + } + return getBucket(&bc.graveyard, tx, graveyardBucketName) +} + +func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(garbageBucketName) + } + return getBucket(&bc.garbage, tx, garbageBucketName) +} + +func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { + if *cache != nil { + return *cache + } + + *cache = tx.Bucket(name) + return *cache +} + +func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = objectToExpirationEpochBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr) +} + +func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = primaryBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr) +} + +func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { + value, ok := (*m)[cnr] + if ok { + return value + } + + if *m == nil { + *m = make(map[cid.ID]*bbolt.Bucket, 1) + } + + bucketName := make([]byte, bucketKeySize) + bucketName = nameFunc(cnr, bucketName) + (*m)[cnr] = getBucket(&value, tx, bucketName) + return value +} diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go index 472b2affc..da27e6085 100644 --- a/pkg/local_object_storage/metabase/containers.go +++ b/pkg/local_object_storage/metabase/containers.go @@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) { return result, err } -func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { +func (db *DB) ContainerSize(id cid.ID) (uint64, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -64,21 +64,22 @@ func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { return 0, ErrDegradedMode } - err = db.boltDB.View(func(tx *bbolt.Tx) error { - size, err = db.containerSize(tx, id) + var size uint64 + err := db.boltDB.View(func(tx *bbolt.Tx) error { + size = db.containerSize(tx, id) - return err + return nil }) return size, metaerr.Wrap(err) } -func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) { +func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 { containerVolume := tx.Bucket(containerVolumeBucketName) key := make([]byte, cidSize) id.Encode(key) - return parseContainerSize(containerVolume.Get(key)), nil + return parseContainerSize(containerVolume.Get(key)) } func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool { diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 07fa7e9cf..c19c65224 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -54,7 +54,7 @@ func (db *DB) Open(ctx context.Context, m mode.Mode) error { func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission) if err != nil { - return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) + return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err) } db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) @@ -73,7 +73,7 @@ func (db *DB) openBolt(ctx context.Context) error { db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions) if err != nil { - return fmt.Errorf("can't open boltDB database: %w", err) + return fmt.Errorf("open boltDB database: %w", err) } db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize @@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error { if reset { err := tx.DeleteBucket(name) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete static bucket %s: %w", k, err) + return fmt.Errorf("delete static bucket %s: %w", k, err) } } _, err := tx.CreateBucketIfNotExists(name) if err != nil { - return fmt.Errorf("could not create static bucket %s: %w", k, err) + return fmt.Errorf("create static bucket %s: %w", k, err) } } for _, b := range deprecatedBuckets { err := tx.DeleteBucket(b) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err) + return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err) } } if !reset { // counters will be recalculated by refill metabase err = syncCounter(tx, false) if err != nil { - return fmt.Errorf("could not sync object counter: %w", err) + return fmt.Errorf("sync object counter: %w", err) } return nil diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index 3ead0d9a0..732f99519 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -238,26 +238,26 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { } if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil { - return fmt.Errorf("could not increase phy object counter: %w", err) + return fmt.Errorf("increase phy object counter: %w", err) } if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil { - return fmt.Errorf("could not increase logical object counter: %w", err) + return fmt.Errorf("increase logical object counter: %w", err) } if isUserObject { if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil { - return fmt.Errorf("could not increase user object counter: %w", err) + return fmt.Errorf("increase user object counter: %w", err) } } return db.incContainerObjectCounter(tx, cnrID, isUserObject) } -func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error { +func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error { b := tx.Bucket(shardInfoBucket) if b == nil { return nil } - return db.updateShardObjectCounterBucket(b, typ, delta, inc) + return db.updateShardObjectCounterBucket(b, typ, delta, false) } func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error { @@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject func syncCounter(tx *bbolt.Tx, force bool) error { shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket) if err != nil { - return fmt.Errorf("could not get shard info bucket: %w", err) + return fmt.Errorf("get shard info bucket: %w", err) } shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 && len(shardInfoB.Get(objectLogicCounterKey)) == 8 && @@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName) if err != nil { - return fmt.Errorf("could not get container counter bucket: %w", err) + return fmt.Errorf("get container counter bucket: %w", err) } var addr oid.Address @@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { return nil }) if err != nil { - return fmt.Errorf("could not iterate objects: %w", err) + return fmt.Errorf("iterate objects: %w", err) } return setObjectCounters(counters, shardInfoB, containerCounterB) @@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container value := containerCounterValue(count) err := containerCounterB.Put(key, value) if err != nil { - return fmt.Errorf("could not update phy container object counter: %w", err) + return fmt.Errorf("update phy container object counter: %w", err) } } phyData := make([]byte, 8) @@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err := shardInfoB.Put(objectPhyCounterKey, phyData) if err != nil { - return fmt.Errorf("could not update phy object counter: %w", err) + return fmt.Errorf("update phy object counter: %w", err) } logData := make([]byte, 8) @@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectLogicCounterKey, logData) if err != nil { - return fmt.Errorf("could not update logic object counter: %w", err) + return fmt.Errorf("update logic object counter: %w", err) } userData := make([]byte, 8) @@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectUserCounterKey, userData) if err != nil { - return fmt.Errorf("could not update user object counter: %w", err) + return fmt.Errorf("update user object counter: %w", err) } return nil @@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) { } var cnrID cid.ID if err := cnrID.Decode(buf); err != nil { - return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err) + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) } return cnrID, nil } diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 62ab1056d..9a5a6e574 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -161,28 +161,28 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { if res.phyCount > 0 { - err := db.updateShardObjectCounter(tx, phy, res.phyCount, false) + err := db.decShardObjectCounter(tx, phy, res.phyCount) if err != nil { - return fmt.Errorf("could not decrease phy object counter: %w", err) + return fmt.Errorf("decrease phy object counter: %w", err) } } if res.logicCount > 0 { - err := db.updateShardObjectCounter(tx, logical, res.logicCount, false) + err := db.decShardObjectCounter(tx, logical, res.logicCount) if err != nil { - return fmt.Errorf("could not decrease logical object counter: %w", err) + return fmt.Errorf("decrease logical object counter: %w", err) } } if res.userCount > 0 { - err := db.updateShardObjectCounter(tx, user, res.userCount, false) + err := db.decShardObjectCounter(tx, user, res.userCount) if err != nil { - return fmt.Errorf("could not decrease user object counter: %w", err) + return fmt.Errorf("decrease user object counter: %w", err) } } if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil { - return fmt.Errorf("could not decrease container object counter: %w", err) + return fmt.Errorf("decrease container object counter: %w", err) } return nil } @@ -259,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } return deleteSingleResult{}, nil @@ -280,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } @@ -308,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter // remove object err = db.deleteObject(tx, obj, false) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove object: %w", err) } if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil { @@ -335,12 +335,12 @@ func (db *DB) deleteObject( err = updateListIndexes(tx, obj, delListIndexItem) if err != nil { - return fmt.Errorf("can't remove list indexes: %w", err) + return fmt.Errorf("remove list indexes: %w", err) } err = updateFKBTIndexes(tx, obj, delFKBTIndexItem) if err != nil { - return fmt.Errorf("can't remove fake bucket tree indexes: %w", err) + return fmt.Errorf("remove fake bucket tree indexes: %w", err) } if isParent { @@ -351,7 +351,7 @@ func (db *DB) deleteObject( addrKey := addressKey(object.AddressOf(obj), key) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove from garbage bucket: %w", err) + return fmt.Errorf("remove from garbage bucket: %w", err) } } } @@ -363,12 +363,12 @@ func (db *DB) deleteObject( func parentLength(tx *bbolt.Tx, addr oid.Address) int { bucketName := make([]byte, bucketKeySize) - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:])) + bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) if bkt == nil { return 0 } - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:]))) + lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) if err != nil { return 0 } @@ -376,11 +376,12 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int { return len(lst) } -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) { +func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt := tx.Bucket(item.name) if bkt != nil { - _ = bkt.Delete(item.key) // ignore error, best effort there + return bkt.Delete(item.key) } + return nil } func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -405,19 +406,16 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { // if list empty, remove the key from bucket if len(lst) == 0 { - _ = bkt.Delete(item.key) // ignore error, best effort there - - return nil + return bkt.Delete(item.key) } // if list is not empty, then update it encodedLst, err := encodeList(lst) if err != nil { - return nil // ignore error, best effort there + return err } - _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there - return nil + return bkt.Put(item.key, encodedLst) } func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -480,35 +478,47 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error return ErrUnknownObjectType } - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: bucketName, key: objKey, - }) + }); err != nil { + return err + } } else { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: parentBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } } - delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index name: smallBucketName(cnr, bucketName), key: objKey, - }) - delUniqueIndexItem(tx, namedBucketItem{ // remove from root index + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index name: rootBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } if expEpoch, ok := hasExpirationEpoch(obj); ok { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: expEpochToObjectBucketName, key: expirationEpochKey(expEpoch, cnr, addr.Object()), - }) - delUniqueIndexItem(tx, namedBucketItem{ + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), key: objKey, - }) + }); err != nil { + return err + } } return nil @@ -529,16 +539,18 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } // also drop EC parent root info if current EC chunk is the last one if !hasAnyChunks { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }) + }); err != nil { + return err + } } if ech.ParentSplitParentID() == nil { @@ -567,16 +579,15 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } // drop split info - delUniqueIndexItem(tx, namedBucketItem{ + return delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), }) - return nil } func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 0294dd3ba..7bd6f90a6 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "fmt" "time" @@ -20,8 +19,8 @@ import ( // ExistsPrm groups the parameters of Exists operation. type ExistsPrm struct { - addr oid.Address - paddr oid.Address + addr oid.Address + ecParentAddr oid.Address } // ExistsRes groups the resulting values of Exists operation. @@ -37,9 +36,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) { p.addr = addr } -// SetParent is an Exists option to set objects parent. -func (p *ExistsPrm) SetParent(addr oid.Address) { - p.paddr = addr +// SetECParent is an Exists option to set objects parent. +func (p *ExistsPrm) SetECParent(addr oid.Address) { + p.ecParentAddr = addr } // Exists returns the fact that the object is in the metabase. @@ -82,7 +81,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err currEpoch := db.epochState.CurrentEpoch() err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch) + res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch) return err }) @@ -90,10 +89,21 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err return res, metaerr.Wrap(err) } -func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) { +func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) { var locked bool - if !parent.Equals(oid.Address{}) { - locked = objectLocked(tx, parent.Container(), parent.Object()) + if !ecParent.Equals(oid.Address{}) { + st, err := objectStatus(tx, ecParent, currEpoch) + if err != nil { + return false, false, err + } + switch st { + case 2: + return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved)) + case 3: + return false, locked, ErrObjectIsExpired + } + + locked = objectLocked(tx, ecParent.Container(), ecParent.Object()) } // check graveyard and object expiration first st, err := objectStatus(tx, addr, currEpoch) @@ -143,12 +153,16 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpo // - 2 if object is covered with tombstone; // - 3 if object is expired. func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { + return objectStatusWithCache(nil, tx, addr, currEpoch) +} + +func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { // locked object could not be removed/marked with GC/expired - if objectLocked(tx, addr.Container(), addr.Object()) { + if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { return 0, nil } - expired, err := isExpired(tx, addr, currEpoch) + expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) if err != nil { return 0, err } @@ -157,8 +171,8 @@ func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, erro return 3, nil } - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + graveyardBkt := getGraveyardBucket(bc, tx) + garbageBkt := getGarbageBucket(bc, tx) addrKey := addressKey(addr, make([]byte, addressKeySize)) return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil } @@ -216,9 +230,9 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e splitInfo := objectSDK.NewSplitInfo() - err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo)) + err := splitInfo.Unmarshal(rawSplitInfo) if err != nil { - return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err) + return nil, fmt.Errorf("unmarshal split info from root index: %w", err) } return splitInfo, nil diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go index 68144d8b1..a1351cb6f 100644 --- a/pkg/local_object_storage/metabase/expired.go +++ b/pkg/local_object_storage/metabase/expired.go @@ -74,9 +74,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A } func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - bucketName := make([]byte, bucketKeySize) - bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName) - b := tx.Bucket(bucketName) + return isExpiredWithCache(nil, tx, addr, currEpoch) +} + +func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { + b := getExpiredBucket(bc, tx, addr.Container()) if b == nil { return false, nil } diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 1cbf78ab2..821810c09 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "fmt" "time" @@ -89,8 +88,12 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { } func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { + return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch) +} + +func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { if checkStatus { - st, err := objectStatus(tx, addr, currEpoch) + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) if err != nil { return nil, err } @@ -110,12 +113,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b bucketName := make([]byte, bucketKeySize) // check in primary index - data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(bytes.Clone(data)) + if b := getPrimaryBucket(bc, tx, cnr); b != nil { + if data := b.Get(key); len(data) != 0 { + return obj, obj.Unmarshal(data) + } } - data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) + data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) if len(data) != 0 { return nil, getECInfoError(tx, cnr, data) } @@ -123,13 +127,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b // if not found then check in tombstone index data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(bytes.Clone(data)) + return obj, obj.Unmarshal(data) } // if not found then check in locker index data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(bytes.Clone(data)) + return obj, obj.Unmarshal(data) } // if not found then check if object is a virtual @@ -185,9 +189,9 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD child := objectSDK.New() - err = child.Unmarshal(bytes.Clone(data)) + err = child.Unmarshal(data) if err != nil { - return nil, fmt.Errorf("can't unmarshal child with parent: %w", err) + return nil, fmt.Errorf("unmarshal child with parent: %w", err) } par := child.Parent() @@ -219,7 +223,7 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) if len(objData) != 0 { obj := objectSDK.New() - if err := obj.Unmarshal(bytes.Clone(objData)); err != nil { + if err := obj.Unmarshal(objData); err != nil { return err } chunk := objectSDK.ECChunk{} diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index c93d2c992..98c428410 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -219,7 +219,6 @@ func benchmarkGet(b *testing.B, numOfObj int) { meta.WithMaxBatchSize(batchSize), meta.WithMaxBatchDelay(10*time.Millisecond), ) - defer func() { require.NoError(b, db.Close(context.Background())) }() addrs := make([]oid.Address, 0, numOfObj) for range numOfObj { @@ -234,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { } db, addrs := prepareDb(runtime.NumCPU()) + defer func() { require.NoError(b, db.Close(context.Background())) }() b.Run("parallel", func(b *testing.B) { b.ReportAllocs() diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go index b0db952b2..2f23d424c 100644 --- a/pkg/local_object_storage/metabase/graveyard.go +++ b/pkg/local_object_storage/metabase/graveyard.go @@ -177,7 +177,7 @@ type gcHandler struct { func (g gcHandler) handleKV(k, _ []byte) error { o, err := garbageFromKV(k) if err != nil { - return fmt.Errorf("could not parse garbage object: %w", err) + return fmt.Errorf("parse garbage object: %w", err) } return g.h(o) @@ -190,7 +190,7 @@ type graveyardHandler struct { func (g graveyardHandler) handleKV(k, v []byte) error { o, err := graveFromKV(k, v) if err != nil { - return fmt.Errorf("could not parse grave: %w", err) + return fmt.Errorf("parse grave: %w", err) } return g.h(o) @@ -240,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address) func garbageFromKV(k []byte) (res GarbageObject, err error) { err = decodeAddressFromKey(&res.addr, k) if err != nil { - err = fmt.Errorf("could not parse address: %w", err) + err = fmt.Errorf("parse address: %w", err) } return diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 5ac0c0be5..76018fb61 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -342,10 +342,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I } func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { - if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil { + if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil { return err } - if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil { + if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil { return err } @@ -373,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck if data != nil { err := targetBucket.Delete(tombKey) if err != nil { - return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err) + return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err) } } diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 5d42e4125..9cccd7dad 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -1,10 +1,8 @@ package meta import ( - "bytes" "context" "errors" - "fmt" "strconv" "time" @@ -111,70 +109,6 @@ func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) return nil } -// IterateCoveredByTombstones iterates over all objects in DB which are covered -// by tombstone with string address from tss. Locked objects are not included -// (do not confuse with objects of type LOCK). -// -// If h returns ErrInterruptIterator, nil returns immediately. -// Returns other errors of h directly. -// -// Does not modify tss. -func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - return db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateCoveredByTombstones(tx, tss, h) - }) -} - -func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error { - bktGraveyard := tx.Bucket(graveyardBucketName) - - err := bktGraveyard.ForEach(func(k, v []byte) error { - var addr oid.Address - if err := decodeAddressFromKey(&addr, v); err != nil { - return err - } - if _, ok := tss[addr.EncodeToString()]; ok { - var addr oid.Address - - err := decodeAddressFromKey(&addr, k) - if err != nil { - return fmt.Errorf("could not parse address of the object under tombstone: %w", err) - } - - if objectLocked(tx, addr.Container(), addr.Object()) { - return nil - } - - return h(addr) - } - - return nil - }) - - if errors.Is(err, ErrInterruptIterator) { - err = nil - } - - return err -} - func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error { var cid cid.ID var oid oid.ID @@ -195,7 +129,7 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) e } return b.ForEach(func(k, v []byte) error { - if oid.Decode(k) == nil && obj.Unmarshal(bytes.Clone(v)) == nil { + if oid.Decode(k) == nil && obj.Unmarshal(v) == nil { return f(cid, oid, obj) } diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 7eed32c55..4c9579965 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -67,65 +66,3 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt return object2.AddressOf(obj) } - -func TestDB_IterateCoveredByTombstones(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - ts := oidtest.Address() - protected1 := oidtest.Address() - protected2 := oidtest.Address() - protectedLocked := oidtest.Address() - garbage := oidtest.Address() - ts.SetContainer(cnr) - protected1.SetContainer(cnr) - protected2.SetContainer(cnr) - protectedLocked.SetContainer(cnr) - - var prm meta.InhumePrm - var err error - - prm.SetAddresses(protected1, protected2, protectedLocked) - prm.SetTombstoneAddress(ts) - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - - prm.SetAddresses(garbage) - prm.SetGCMark() - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - - var handled []oid.Address - - tss := map[string]oid.Address{ - ts.EncodeToString(): ts, - } - - err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error { - handled = append(handled, addr) - return nil - }) - require.NoError(t, err) - - require.Len(t, handled, 3) - require.Contains(t, handled, protected1) - require.Contains(t, handled, protected2) - require.Contains(t, handled, protectedLocked) - - err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()}) - require.NoError(t, err) - - handled = handled[:0] - - err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error { - handled = append(handled, addr) - return nil - }) - require.NoError(t, err) - - require.Len(t, handled, 2) - require.NotContains(t, handled, protectedLocked) -} diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index a7ff2222f..2a0bd7f6a 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -87,7 +87,8 @@ type CountAliveObjectsInContainerPrm struct { } // ListWithCursor lists physical objects available in metabase starting from -// cursor. Includes objects of all types. Does not include inhumed objects. +// cursor. Includes objects of all types. Does not include inhumed and expired +// objects. // Use cursor value from response for consecutive requests. // // Returns ErrEndOfListing if there are no more objects to return or count @@ -138,11 +139,12 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, var containerID cid.ID var offset []byte - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + bc := newBucketCache() rawAddr := make([]byte, cidSize, addressKeySize) + currEpoch := db.epochState.CurrentEpoch() + loop: for ; name != nil; name, _ = c.Next() { cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name) @@ -166,8 +168,8 @@ loop: bkt := tx.Bucket(name) if bkt != nil { copy(rawAddr, cidRaw) - result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID, - result, count, cursor, threshold) + result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, + result, count, cursor, threshold, currEpoch) if err != nil { return nil, nil, err } @@ -185,8 +187,7 @@ loop: if offset != nil { // new slice is much faster but less memory efficient // we need to copy, because offset exists during bbolt tx - cursor.inBucketOffset = make([]byte, len(offset)) - copy(cursor.inBucketOffset, offset) + cursor.inBucketOffset = bytes.Clone(offset) } if len(result) == 0 { @@ -195,29 +196,29 @@ loop: // new slice is much faster but less memory efficient // we need to copy, because bucketName exists during bbolt tx - cursor.bucketName = make([]byte, len(bucketName)) - copy(cursor.bucketName, bucketName) + cursor.bucketName = bytes.Clone(bucketName) return result, cursor, nil } // selectNFromBucket similar to selectAllFromBucket but uses cursor to find // object to start selecting from. Ignores inhumed objects. -func selectNFromBucket(bkt *bbolt.Bucket, // main bucket +func selectNFromBucket( + bc *bucketCache, + bkt *bbolt.Bucket, // main bucket objType objectSDK.Type, // type of the objects stored in the main bucket - graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets cidRaw []byte, // container ID prefix, optimization cnt cid.ID, // container ID to []objectcore.Info, // listing result limit int, // stop listing at `limit` items in result cursor *Cursor, // start from cursor object threshold bool, // ignore cursor and start immediately + currEpoch uint64, ) ([]objectcore.Info, []byte, *Cursor, error) { if cursor == nil { cursor = new(Cursor) } - count := len(to) c := bkt.Cursor() k, v := c.First() @@ -229,7 +230,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } for ; k != nil; k, v = c.Next() { - if count >= limit { + if len(to) >= limit { break } @@ -239,17 +240,25 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } offset = k + graveyardBkt := getGraveyardBucket(bc, bkt.Tx()) + garbageBkt := getGarbageBucket(bc, bkt.Tx()) if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { continue } + var o objectSDK.Object + if err := o.Unmarshal(v); err != nil { + return nil, nil, nil, err + } + + expEpoch, hasExpEpoch := hasExpirationEpoch(&o) + if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { + continue + } + var isLinkingObj bool var ecInfo *objectcore.ECInfo if objType == objectSDK.TypeRegular { - var o objectSDK.Object - if err := o.Unmarshal(bytes.Clone(v)); err != nil { - return nil, nil, nil, err - } isLinkingObj = isLinkObject(&o) ecHeader := o.ECHeader() if ecHeader != nil { @@ -265,7 +274,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket a.SetContainer(cnt) a.SetObject(obj) to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) - count++ } return to, offset, cursor, nil @@ -413,7 +421,7 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, p var ecInfo *objectcore.ECInfo if prm.ObjectType == objectSDK.TypeRegular { var o objectSDK.Object - if err := o.Unmarshal(bytes.Clone(v)); err != nil { + if err := o.Unmarshal(v); err != nil { return err } isLinkingObj = isLinkObject(&o) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 6f6463071..02985991c 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -3,14 +3,17 @@ package meta_test import ( "context" "errors" + "strconv" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" @@ -18,6 +21,8 @@ import ( func BenchmarkListWithCursor(b *testing.B) { db := listWithCursorPrepareDB(b) + defer func() { require.NoError(b, db.Close(context.Background())) }() + b.Run("1 item", func(b *testing.B) { benchmarkListWithCursor(b, db, 1) }) @@ -33,7 +38,6 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ NoSync: true, })) // faster single-thread generation - defer func() { require.NoError(b, db.Close(context.Background())) }() obj := testutil.GenerateObject() for i := range 100_000 { // should be a multiple of all batch sizes @@ -55,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { - if err != meta.ErrEndOfListing { + if !errors.Is(err, meta.ErrEndOfListing) { b.Fatalf("error: %v", err) } prm.SetCursor(nil) @@ -70,14 +74,16 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { func TestLisObjectsWithCursor(t *testing.T) { t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - const ( + currEpoch = 100 + expEpoch = currEpoch - 1 containers = 5 - total = containers * 4 // regular + ts + child + lock + total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired ) + db := newDB(t, meta.WithEpochState(epochState{currEpoch})) + defer func() { require.NoError(t, db.Close(context.Background())) }() + expected := make([]object.Info, 0, total) // fill metabase with objects @@ -126,6 +132,26 @@ func TestLisObjectsWithCursor(t *testing.T) { err = putBig(db, child) require.NoError(t, err) expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular}) + + // add expired object (do not include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + + // add non-expired object (include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) + + // add locked expired object (include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + objID := oidtest.ID() + obj.SetID(objID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID})) + expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) } t.Run("success with various count", func(t *testing.T) { diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index 6b78ef392..f4cb9e53b 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -4,8 +4,10 @@ import ( "bytes" "context" "fmt" + "slices" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -62,9 +64,7 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid. return ErrReadOnlyMode } - if len(locked) == 0 { - panic("empty locked list") - } + assert.False(len(locked) == 0, "empty locked list") err := db.lockInternal(locked, cnr, locker) success = err == nil @@ -162,7 +162,11 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { // checks if specified object is locked in the specified container. func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - bucketLocked := tx.Bucket(bucketNameLocked) + return objectLockedWithCache(nil, tx, idCnr, idObj) +} + +func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { + bucketLocked := getLockedBucket(bc, tx) if bucketLocked != nil { key := make([]byte, cidSize) idCnr.Encode(key) @@ -176,7 +180,7 @@ func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { } // return `LOCK` id's if specified object is locked in the specified container. -func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { +func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { var lockers []oid.ID bucketLocked := tx.Bucket(bucketNameLocked) if bucketLocked != nil { @@ -250,7 +254,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres unlockedObjects = append(unlockedObjects, addr) } else { // exclude locker - keyLockers = append(keyLockers[:i], keyLockers[i+1:]...) + keyLockers = slices.Delete(keyLockers, i, i+1) v, err = encodeList(keyLockers) if err != nil { @@ -351,20 +355,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e return res, err } -// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found +// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found // object is considered as non-locked. // // Returns only non-logical errors related to underlying database. -func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { +func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { var ( startedAt = time.Now() success = false ) defer func() { - db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success) + db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked", + _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks", trace.WithAttributes( attribute.String("address", addr.EncodeToString()), )) @@ -377,7 +381,7 @@ func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, er return res, ErrDegradedMode } err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res, err = getLocked(tx, addr.Container(), addr.Object()) + res, err = getLocks(tx, addr.Container(), addr.Object()) return nil })) success = err == nil diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index ce6ae1004..7edb96384 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -19,7 +19,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { if !db.mode.NoMetabase() { if err := db.Close(ctx); err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } @@ -31,7 +31,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { err = db.Init(ctx) } if err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index d7675869f..5e1bbfe9e 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "encoding/binary" "errors" @@ -121,9 +120,15 @@ func (db *DB) put(tx *bbolt.Tx, return PutRes{}, errors.New("missing container in object") } + var ecParentAddress oid.Address + if ecHeader := obj.ECHeader(); ecHeader != nil { + ecParentAddress.SetContainer(cnr) + ecParentAddress.SetObject(ecHeader.Parent()) + } + isParent := si != nil - exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch) + exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch) var splitInfoError *objectSDK.SplitInfoError if errors.As(err, &splitInfoError) { @@ -174,18 +179,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o err := putUniqueIndexes(tx, obj, si, id) if err != nil { - return fmt.Errorf("can't put unique indexes: %w", err) + return fmt.Errorf("put unique indexes: %w", err) } err = updateListIndexes(tx, obj, putListIndexItem) if err != nil { - return fmt.Errorf("can't put list indexes: %w", err) + return fmt.Errorf("put list indexes: %w", err) } if indexAttributes { err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) if err != nil { - return fmt.Errorf("can't put fake bucket tree indexes: %w", err) + return fmt.Errorf("put fake bucket tree indexes: %w", err) } } @@ -244,7 +249,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad } rawObject, err := obj.CutPayload().Marshal() if err != nil { - return fmt.Errorf("can't marshal object header: %w", err) + return fmt.Errorf("marshal object header: %w", err) } return putUniqueIndexItem(tx, namedBucketItem{ name: bucketName, @@ -314,7 +319,7 @@ func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName [] return si.Marshal() default: oldSI := objectSDK.NewSplitInfo() - if err := oldSI.Unmarshal(bytes.Clone(old)); err != nil { + if err := oldSI.Unmarshal(old); err != nil { return nil, err } si = util.MergeSplitInfo(si, oldSI) @@ -469,7 +474,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } data, err := update(bkt.Get(item.key), item.val) @@ -486,12 +491,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } fkbtRoot, err := createBucketLikelyExists(bkt, item.key) if err != nil { - return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err) + return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err) } return fkbtRoot.Put(item.val, zeroValue) @@ -500,19 +505,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } lst, err := decodeList(bkt.Get(item.key)) if err != nil { - return fmt.Errorf("can't decode leaf list %v: %w", item.key, err) + return fmt.Errorf("decode leaf list %v: %w", item.key, err) } lst = append(lst, item.val) encodedLst, err := encodeList(lst) if err != nil { - return fmt.Errorf("can't encode leaf list %v: %w", item.key, err) + return fmt.Errorf("encode leaf list %v: %w", item.key, err) } return bkt.Put(item.key, encodedLst) diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 45faecc13..5f0956f0b 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -37,7 +37,7 @@ func TestResetDropsContainerBuckets(t *testing.T) { for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) - putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) + putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) } diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index f802036be..60da50671 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -131,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters res := make([]oid.Address, 0, len(mAddr)) + bc := newBucketCache() for a, ind := range mAddr { if ind != expLen { continue // ignore objects with unmatched fast filters @@ -145,7 +146,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters var addr oid.Address addr.SetContainer(cnr) addr.SetObject(id) - st, err := objectStatus(tx, addr, currEpoch) + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) if err != nil { return nil, err } @@ -153,7 +154,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters continue // ignore removed objects } - addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) + addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) if !match { continue // ignore objects with unmatched slow filters } @@ -451,13 +452,13 @@ func (db *DB) selectObjectID( } // matchSlowFilters return true if object header is matched by all slow filters. -func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { +func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { result := addr if len(f) == 0 { return result, true } - obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch) + obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) if err != nil { return result, false } @@ -515,9 +516,9 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc return result, true } -func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { +func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { buf := make([]byte, addressKeySize) - obj, err := db.get(tx, addr, buf, true, false, currEpoch) + obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) if err != nil { var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { @@ -527,7 +528,7 @@ func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch continue } addr.SetObject(objID) - obj, err = db.get(tx, addr, buf, true, false, currEpoch) + obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) if err == nil { return obj, true, nil } @@ -565,7 +566,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt case v2object.FilterHeaderContainerID: // support deprecated field err := res.cnr.DecodeString(filters[i].Value()) if err != nil { - return filterGroup{}, fmt.Errorf("can't parse container id: %w", err) + return filterGroup{}, fmt.Errorf("parse container id: %w", err) } res.withCnrFilter = true diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 5cc998311..ce2156d2e 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -1216,6 +1216,8 @@ func TestExpiredObjects(t *testing.T) { } func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { + b.ReportAllocs() + var prm meta.SelectPrm prm.SetContainerID(cid) prm.SetFilters(fs) diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go index e58115bc8..72618b1a0 100644 --- a/pkg/local_object_storage/metabase/shard_id.go +++ b/pkg/local_object_storage/metabase/shard_id.go @@ -32,13 +32,13 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error } if err := db.openDB(ctx, mode); err != nil { - return nil, fmt.Errorf("failed to open metabase: %w", err) + return nil, fmt.Errorf("open metabase: %w", err) } id, err := db.readShardID() if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return id, metaerr.Wrap(err) @@ -70,7 +70,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err } if err := db.openDB(ctx, mode); err != nil { - return fmt.Errorf("failed to open metabase: %w", err) + return fmt.Errorf("open metabase: %w", err) } err := db.writeShardID(id) @@ -79,7 +79,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err } if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return metaerr.Wrap(err) diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go index 6d620b41a..8f2376503 100644 --- a/pkg/local_object_storage/metabase/storage_id.go +++ b/pkg/local_object_storage/metabase/storage_id.go @@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte { // StorageID returns storage descriptor for objects from the blobstor. // It is put together with the object can makes get/delete operation faster. -func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) { +func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) { var ( startedAt = time.Now() success = false @@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes db.modeMtx.RLock() defer db.modeMtx.RUnlock() + var res StorageIDRes if db.mode.NoMetabase() { return res, ErrDegradedMode } - err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.id, err = db.storageID(tx, prm.addr) - - return err + err := db.boltDB.View(func(tx *bbolt.Tx) error { + res.id = db.storageID(tx, prm.addr) + return nil }) success = err == nil return res, metaerr.Wrap(err) } -func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) { +func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte { key := make([]byte, bucketKeySize) smallBucket := tx.Bucket(smallBucketName(addr.Container(), key)) if smallBucket == nil { - return nil, nil + return nil } storageID := smallBucket.Get(objectKey(addr.Object(), key)) if storageID == nil { - return nil, nil + return nil } - return bytes.Clone(storageID), nil + return bytes.Clone(storageID) } // UpdateStorageIDPrm groups the parameters of UpdateStorageID operation. diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index bcf72f440..4948f3424 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error { NoSync: true, }) if err != nil { - return fmt.Errorf("can't open new metabase to compact: %w", err) + return fmt.Errorf("open new metabase to compact: %w", err) } if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil { return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName))) @@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i } expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64) if err != nil { - return fmt.Errorf("could not parse expiration epoch: %w", err) + return fmt.Errorf("parse expiration epoch: %w", err) } expirationEpochBucket := b.Bucket(attrValue) attrKeyValueC := expirationEpochBucket.Cursor() @@ -360,7 +360,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv return nil } last = keys[len(keys)-1] - cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys) + cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys) if err != nil { log("deleting user attribute buckets completed with an error:", err) return err @@ -376,8 +376,8 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv } } -func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { - keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs) +func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { + keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs) if err != nil { return 0, fmt.Errorf("select non indexed user attributes: %w", err) } @@ -394,12 +394,12 @@ func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, return uint64(len(keysToDrop)), nil } -func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) { +func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) { var keysToDrop [][]byte for _, key := range keys { attr, ok := attributeFromAttributeBucket(key) if !ok { - return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) + return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) } if !IsAtrributeIndexed(attr) { keysToDrop = append(keysToDrop, key) @@ -407,9 +407,9 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] } contID, ok := cidFromAttributeBucket(key) if !ok { - return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) + return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) } - info, err := cs.Info(contID) + info, err := cs.Info(ctx, contID) if err != nil { return nil, err } diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index 5444264be..c90de4dd6 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -45,7 +45,7 @@ func TestUpgradeV2ToV3(t *testing.T) { type testContainerInfoProvider struct{} -func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) { +func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) { return container.Info{}, nil } diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 0a2f91a47..4ad83332b 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -231,11 +232,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) { epoch := binary.BigEndian.Uint64(key) var cnr cid.ID if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err) + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err) } var obj oid.ID if err := obj.Decode(key[epochSize+cidSize:]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err) + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err) } return epoch, cnr, obj, nil } @@ -278,9 +279,7 @@ func objectKey(obj oid.ID, key []byte) []byte { // // firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - if len(objs) == 0 { - panic("empty object list in firstIrregularObjectType") - } + assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") var keys [2][1 + cidSize]byte diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go index 048bb9af6..fbc0f1ad9 100644 --- a/pkg/local_object_storage/metabase/version.go +++ b/pkg/local_object_storage/metabase/version.go @@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error { b, err := tx.CreateBucketIfNotExists(shardInfoBucket) if err != nil { - return fmt.Errorf("can't create auxiliary bucket: %w", err) + return fmt.Errorf("create auxiliary bucket: %w", err) } return b.Put(versionKey, data) } diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index c62d728b1..897b37ea0 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -106,7 +106,7 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { } } if err != nil { - return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) + return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) } t.mode = m @@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { readOnly := m.ReadOnly() err := util.MkdirAllX(filepath.Dir(t.path), t.perm) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err)) + return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err)) } opts := *bbolt.DefaultOptions @@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { t.db, err = bbolt.Open(t.path, t.perm, &opts) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err)) + return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err)) } t.db.MaxBatchSize = t.maxBatchSize @@ -419,10 +419,7 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri return err } - i, node, err := t.getPathPrefix(bTree, attr, path) - if err != nil { - return err - } + i, node := t.getPathPrefix(bTree, attr, path) ts := t.getLatestTimestamp(bLog, d.Position, d.Size) lm = make([]Move, len(path)-i+1) @@ -980,10 +977,7 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st b := treeRoot.Bucket(dataBucket) - i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) - if err != nil { - return err - } + i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) if i < len(path)-1 { return nil } @@ -1083,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol } // TreeSortedByFilename implements the Forest interface. -func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) { +func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { var ( startedAt = time.Now() success = false @@ -1161,7 +1155,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = &s + last = NewCursor(s, res[len(res)-1].LastChild()) } return res, last, metaerr.Wrap(err) } @@ -1172,10 +1166,10 @@ func sortByFilename(nodes []NodeInfo) { }) } -func sortAndCut(result []NodeInfo, last *string) []NodeInfo { +func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { var lastBytes []byte if last != nil { - lastBytes = []byte(*last) + lastBytes = []byte(last.GetFilename()) } sortByFilename(result) @@ -1240,7 +1234,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f nodes = nil length = actualLength + 1 count = 0 - c.Seek(append(prefix, byte(length), byte(length>>8))) + c.Seek(binary.LittleEndian.AppendUint16(prefix, length)) c.Prev() // c.Next() will be performed by for loop } } @@ -1360,7 +1354,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err return nil }) if err != nil { - return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err)) + return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err)) } success = true return ids, nil @@ -1504,7 +1498,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* var contID cidSDK.ID if err := contID.Decode(k[:32]); err != nil { - return fmt.Errorf("failed to decode containerID: %w", err) + return fmt.Errorf("decode container ID: %w", err) } res.Items = append(res.Items, ContainerIDTreeID{ CID: contID, @@ -1512,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* }) if len(res.Items) == batchSize { - res.NextPageToken = make([]byte, len(k)) - copy(res.NextPageToken, k) + res.NextPageToken = bytes.Clone(k) break } } @@ -1526,7 +1519,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* return &res, nil } -func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) { +func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) { c := bTree.Cursor() var curNodes []Node @@ -1549,14 +1542,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin } if len(nextNodes) == 0 { - return i, curNodes, nil + return i, curNodes } } - return len(path), nextNodes, nil + return len(path), nextNodes } -func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) { +func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) { c := bTree.Cursor() var curNode Node @@ -1576,10 +1569,10 @@ loop: childKey, value = c.Next() } - return i, curNode, nil + return i, curNode } - return len(path), curNode, nil + return len(path), curNode } func (t *boltForest) moveFromBytes(m *Move, data []byte) error { @@ -1589,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error { lm.Child = binary.LittleEndian.Uint64(data) lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.Meta.FromBytes(data[16:]) + return lm.FromBytes(data[16:]) } func (t *boltForest) logToBytes(lm *Move) []byte { w := io.NewBufBinWriter() - size := 8 + 8 + lm.Meta.Size() + 1 + size := 8 + 8 + lm.Size() + 1 // if lm.HasOld { // size += 8 + lm.Old.Meta.Size() // } @@ -1602,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte { w.Grow(size) w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Parent) - lm.Meta.EncodeBinary(w.BinWriter) + lm.EncodeBinary(w.BinWriter) // w.WriteBool(lm.HasOld) // if lm.HasOld { // w.WriteU64LE(lm.Old.Parent) @@ -1664,7 +1657,7 @@ func internalKeyPrefix(key []byte, k string) []byte { key = append(key, 'i') l := len(k) - key = append(key, byte(l), byte(l>>8)) + key = binary.LittleEndian.AppendUint16(key, uint16(l)) key = append(key, k...) return key } @@ -1679,14 +1672,10 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte { key = internalKeyPrefix(key, k) l := len(v) - key = append(key, byte(l), byte(l>>8)) + key = binary.LittleEndian.AppendUint16(key, uint16(l)) key = append(key, v...) - var raw [8]byte - binary.LittleEndian.PutUint64(raw[:], parent) - key = append(key, raw[:]...) - - binary.LittleEndian.PutUint64(raw[:], node) - key = append(key, raw[:]...) + key = binary.LittleEndian.AppendUint64(key, parent) + key = binary.LittleEndian.AppendUint64(key, node) return key } diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index f31504e2b..ebfd0bcc0 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "sort" "strings" @@ -84,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID s.operations = append(s.operations, op) } - mCopy := make([]KeyValue, len(m)) - copy(mCopy, m) + mCopy := slices.Clone(m) op := s.do(&Move{ Parent: node, Meta: Meta{ @@ -164,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, } // TreeSortedByFilename implements the Forest interface. -func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) { +func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { @@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI var res []NodeInfo for _, nodeID := range nodeIDs { - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) for _, childID := range children { var found bool for _, kv := range s.infoMap[childID].Meta.Items { @@ -204,17 +204,14 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI r := mergeNodeInfos(res) for i := range r { - if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start { - finish := i + count - if len(res) < finish { - finish = len(res) - } + if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { + finish := min(len(res), i+count) last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], &last, nil + return r[i:finish], NewCursor(last, 0), nil } } last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, &last, nil + return nil, NewCursor(last, 0), nil } // TreeGetChildren implements the Forest interface. @@ -225,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str return nil, ErrTreeNotFound } - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) res := make([]NodeInfo, 0, len(children)) for _, childID := range children { res = append(res, NodeInfo{ diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index de56fc82b..844084c55 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) @@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go index 5a00bcf7a..b035be1e1 100644 --- a/pkg/local_object_storage/pilorama/heap.go +++ b/pkg/local_object_storage/pilorama/heap.go @@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any { // fixedHeap maintains a fixed number of smallest elements started at some point. type fixedHeap struct { - start *string + start *Cursor sorted bool count int h *filenameHeap } -func newHeap(start *string, count int) *fixedHeap { +func newHeap(start *Cursor, count int) *fixedHeap { h := new(filenameHeap) heap.Init(h) @@ -50,8 +50,19 @@ func newHeap(start *string, count int) *fixedHeap { const amortizationMultiplier = 5 func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil && filename <= *h.start { - return false + if h.start != nil { + if filename < h.start.GetFilename() { + return false + } else if filename == h.start.GetFilename() { + // A tree may have a lot of nodes with the same filename but different versions so that + // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call + // with the same filename. + pos := slices.Index(id, h.start.GetNode()) + if pos == -1 || pos+1 >= len(id) { + return false + } + id = id[pos+1:] + } } *h.h = append(*h.h, heapInfo{id: id, filename: filename}) diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index ce7b3db1e..28b7faec8 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree { // undo un-does op and changes s in-place. func (s *memoryTree) undo(op *move) { if op.HasOld { - s.tree.infoMap[op.Child] = op.Old + s.infoMap[op.Child] = op.Old } else { - delete(s.tree.infoMap, op.Child) + delete(s.infoMap, op.Child) } } @@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move { }, } - shouldPut := !s.tree.isAncestor(op.Child, op.Parent) - p, ok := s.tree.infoMap[op.Child] + shouldPut := !s.isAncestor(op.Child, op.Parent) + p, ok := s.infoMap[op.Child] if ok { lm.HasOld = true lm.Old = p @@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move { p.Meta = m p.Parent = op.Parent - s.tree.infoMap[op.Child] = p + s.infoMap[op.Child] = p return lm } @@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { } var nodes []Node - var lastTs Timestamp + var lastTS Timestamp children := t.getChildren(curNode) for i := range children { @@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { fileName := string(info.Meta.GetAttr(attr)) if fileName == path[len(path)-1] { if latest { - if info.Meta.Time >= lastTs { + if info.Meta.Time >= lastTS { nodes = append(nodes[:0], children[i]) } } else { diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 1f7e742a2..e1f6cd8e7 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -37,7 +37,7 @@ type Forest interface { TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute.. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) + TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) // TreeGetOpLog returns first log operation stored at or above the height. // In case no such operation is found, empty Move and nil error should be returned. TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) @@ -79,6 +79,38 @@ const ( AttributeVersion = "Version" ) +// Cursor keeps state between function calls for traversing nodes. +// It stores the attributes associated with a previous call, allowing subsequent operations +// to resume traversal from this point rather than starting from the beginning. +type Cursor struct { + // Last traversed filename. + filename string + + // Last traversed node. + node Node +} + +func NewCursor(filename string, node Node) *Cursor { + return &Cursor{ + filename: filename, + node: node, + } +} + +func (c *Cursor) GetFilename() string { + if c == nil { + return "" + } + return c.filename +} + +func (c *Cursor) GetNode() Node { + if c == nil { + return Node(0) + } + return c.node +} + // CIDDescriptor contains container ID and information about the node position // in the list of container nodes. type CIDDescriptor struct { diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go index 106ba6ae9..36d347f10 100644 --- a/pkg/local_object_storage/pilorama/multinode.go +++ b/pkg/local_object_storage/pilorama/multinode.go @@ -25,6 +25,10 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool { return true } +func (r *MultiNodeInfo) LastChild() Node { + return r.Children[len(r.Children)-1] +} + func (n NodeInfo) ToMultiNode() MultiNodeInfo { return MultiNodeInfo{ Children: MultiNode{n.ID}, diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go index 54c2b90a6..eecee1527 100644 --- a/pkg/local_object_storage/pilorama/split_test.go +++ b/pkg/local_object_storage/pilorama/split_test.go @@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) { require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4")) require.Equal(t, []byte{10}, testGetByPath(t, "value0")) - testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) { + testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) { res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize) require.NoError(t, err) return res, last diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 364649b50..b4015ae8d 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 { return r.size } -func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { s.m.RLock() defer s.m.RUnlock() @@ -34,9 +34,15 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { return ContainerSizeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerSizeRes{}, err + } + defer release() + size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { - return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err) + return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) } return ContainerSizeRes{ @@ -69,9 +75,15 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont return ContainerCountRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerCountRes{}, err + } + defer release() + counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { - return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err) + return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) } return ContainerCountRes{ @@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerSize(ctx, id) } @@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerCount(ctx, id) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 5a9e26155..d489b8b0d 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { - return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly)) + return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly) } return nil } @@ -72,7 +72,7 @@ func (s *Shard) Open(ctx context.Context) error { for j := i + 1; j < len(components); j++ { if err := components[j].Open(ctx, m); err != nil { // Other components must be opened, fail. - return fmt.Errorf("could not open %T: %w", components[j], err) + return fmt.Errorf("open %T: %w", components[j], err) } } err = s.handleMetabaseFailure(ctx, "open", err) @@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error { break } - return fmt.Errorf("could not open %T: %w", component, err) + return fmt.Errorf("open %T: %w", component, err) } } return nil @@ -108,19 +108,17 @@ func (s *Shard) Init(ctx context.Context) error { s.updateMetrics(ctx) s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - eventChan: make(chan Event), - mEventHandler: map[eventType]*eventHandlers{ - eventNewEpoch: { - cancelFunc: func() {}, - handlers: []eventHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, - }, + gcCfg: &s.gcCfg, + remover: s.removeGarbage, + stopChannel: make(chan struct{}), + newEpochChan: make(chan uint64), + newEpochHandlers: &newEpochHandlers{ + cancelFunc: func() {}, + handlers: []newEpochHandler{ + s.collectExpiredLocks, + s.collectExpiredObjects, + s.collectExpiredTombstones, + s.collectExpiredMetrics, }, }, } @@ -184,7 +182,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { break } - return fmt.Errorf("could not initialize %T: %w", component, err) + return fmt.Errorf("initialize %T: %w", component, err) } } return nil @@ -205,7 +203,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err := s.metaBase.Reset() if err != nil { - return fmt.Errorf("could not reset metabase: %w", err) + return fmt.Errorf("reset metabase: %w", err) } withCount := true @@ -216,8 +214,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - if s.cfg.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.cfg.refillMetabaseWorkersCount) + if s.refillMetabaseWorkersCount > 0 { + eg.SetLimit(s.refillMetabaseWorkersCount) } var completedCount uint64 @@ -254,12 +252,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err = errors.Join(egErr, itErr) if err != nil { - return fmt.Errorf("could not put objects to the meta: %w", err) + return fmt.Errorf("put objects to the meta: %w", err) } err = s.metaBase.SyncCounters() if err != nil { - return fmt.Errorf("could not sync object counters: %w", err) + return fmt.Errorf("sync object counters: %w", err) } success = true @@ -272,7 +270,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, if err := obj.Unmarshal(data); err != nil { s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject, zap.Stringer("address", addr), - zap.String("err", err.Error())) + zap.Error(err)) return nil } @@ -280,7 +278,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, var isIndexedContainer bool if hasIndexedAttribute { - info, err := s.containerInfo.Info(addr.Container()) + info, err := s.containerInfo.Info(ctx, addr.Container()) if err != nil { return err } @@ -318,7 +316,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error { var lock objectSDK.Lock if err := lock.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal lock content: %w", err) + return fmt.Errorf("unmarshal lock content: %w", err) } locked := make([]oid.ID, lock.NumberOfMembers()) @@ -328,7 +326,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err id, _ := obj.ID() err := s.metaBase.Lock(ctx, cnr, id, locked) if err != nil { - return fmt.Errorf("could not lock objects: %w", err) + return fmt.Errorf("lock objects: %w", err) } return nil } @@ -337,7 +335,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object tombstone := objectSDK.NewTombstone() if err := tombstone.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal tombstone content: %w", err) + return fmt.Errorf("unmarshal tombstone content: %w", err) } tombAddr := object.AddressOf(obj) @@ -358,13 +356,14 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object _, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - return fmt.Errorf("could not inhume objects: %w", err) + return fmt.Errorf("inhume objects: %w", err) } return nil } // Close releases all Shard's components. func (s *Shard) Close(ctx context.Context) error { + unlock := s.lockExclusive() if s.rb != nil { s.rb.Stop(ctx, s.log) } @@ -390,6 +389,14 @@ func (s *Shard) Close(ctx context.Context) error { } } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + + unlock() + + // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. + // So to prevent deadlock GC stopping is outside of exclusive lock. // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { s.gc.stop(ctx) @@ -445,6 +452,10 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { return err } } + if c.opsLimiter != nil { + s.opsLimiter.Close() + s.opsLimiter = c.opsLimiter + } return s.setMode(ctx, c.info.Mode) } diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go index b3bc6a30b..8dc1f0522 100644 --- a/pkg/local_object_storage/shard/count.go +++ b/pkg/local_object_storage/shard/count.go @@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + cc, err := s.metaBase.ObjectCounters() if err != nil { return 0, err diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index f62cecd56..0101817a8 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -55,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del return DeleteRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return DeleteRes{}, err + } + defer release() + result := DeleteRes{} for _, addr := range prm.addr { select { @@ -112,8 +117,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil { s.log.Debug(ctx, logs.StorageIDRetrievalFailure, zap.Stringer("object", addr), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } storageID := res.StorageID() @@ -132,8 +136,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil && !client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, zap.Stringer("object_address", addr), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } return nil diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 784bf293a..2c11b6b01 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -18,7 +18,7 @@ type ExistsPrm struct { // Exists option to set object checked for existence. Address oid.Address // Exists option to set parent object checked for existence. - ParentAddress oid.Address + ECParentAddress oid.Address } // ExistsRes groups the resulting values of Exists operation. @@ -53,10 +53,6 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { )) defer span.End() - var exists bool - var locked bool - var err error - s.m.RLock() defer s.m.RUnlock() @@ -64,7 +60,18 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { return ExistsRes{}, ErrShardDisabled } else if s.info.EvacuationInProgress { return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } else if s.info.Mode.NoMetabase() { + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ExistsRes{}, err + } + defer release() + + var exists bool + var locked bool + + if s.info.Mode.NoMetabase() { var p common.ExistsPrm p.Address = prm.Address @@ -74,7 +81,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { } else { var existsPrm meta.ExistsPrm existsPrm.SetAddress(prm.Address) - existsPrm.SetParent(prm.ParentAddress) + existsPrm.SetECParent(prm.ECParentAddress) var res meta.ExistsRes res, err = s.metaBase.Exists(ctx, existsPrm) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index c212f8c36..a262a52cb 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -6,11 +6,13 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -31,41 +33,14 @@ type TombstoneSource interface { IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool } -// Event represents class of external events. -type Event interface { - typ() eventType -} +type newEpochHandler func(context.Context, uint64) -type eventType int - -const ( - _ eventType = iota - eventNewEpoch -) - -type newEpoch struct { - epoch uint64 -} - -func (e newEpoch) typ() eventType { - return eventNewEpoch -} - -// EventNewEpoch returns new epoch event. -func EventNewEpoch(e uint64) Event { - return newEpoch{ - epoch: e, - } -} - -type eventHandler func(context.Context, Event) - -type eventHandlers struct { +type newEpochHandlers struct { prevGroup sync.WaitGroup cancelFunc context.CancelFunc - handlers []eventHandler + handlers []newEpochHandler } type gcRunResult struct { @@ -107,10 +82,10 @@ type gc struct { remover func(context.Context) gcRunResult - // eventChan is used only for listening for the new epoch event. + // newEpochChan is used only for listening for the new epoch event. // It is ok to keep opened, we are listening for context done when writing in it. - eventChan chan Event - mEventHandler map[eventType]*eventHandlers + newEpochChan chan uint64 + newEpochHandlers *newEpochHandlers } type gcCfg struct { @@ -140,16 +115,8 @@ func defaultGCCfg() gcCfg { } func (gc *gc) init(ctx context.Context) { - sz := 0 - - for _, v := range gc.mEventHandler { - sz += len(v.handlers) - } - - if sz > 0 { - gc.workerPool = gc.workerPoolInit(sz) - } - + gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) gc.wg.Add(2) go gc.tickRemover(ctx) go gc.listenEvents(ctx) @@ -166,7 +133,7 @@ func (gc *gc) listenEvents(ctx context.Context) { case <-ctx.Done(): gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return - case event, ok := <-gc.eventChan: + case event, ok := <-gc.newEpochChan: if !ok { gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return @@ -177,38 +144,33 @@ func (gc *gc) listenEvents(ctx context.Context) { } } -func (gc *gc) handleEvent(ctx context.Context, event Event) { - v, ok := gc.mEventHandler[event.typ()] - if !ok { - return - } - - v.cancelFunc() - v.prevGroup.Wait() +func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() var runCtx context.Context - runCtx, v.cancelFunc = context.WithCancel(ctx) + runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) - v.prevGroup.Add(len(v.handlers)) + gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) - for i := range v.handlers { + for i := range gc.newEpochHandlers.handlers { select { case <-ctx.Done(): return default: } - h := v.handlers[i] + h := gc.newEpochHandlers.handlers[i] err := gc.workerPool.Submit(func() { - defer v.prevGroup.Done() - h(runCtx, event) + defer gc.newEpochHandlers.prevGroup.Done() + h(runCtx, epoch) }) if err != nil { gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, - zap.String("error", err.Error()), + zap.Error(err), ) - v.prevGroup.Done() + gc.newEpochHandlers.prevGroup.Done() } } } @@ -265,6 +227,9 @@ func (gc *gc) stop(ctx context.Context) { gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() + + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() } // iterates over metabase and deletes objects @@ -289,31 +254,10 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) - buf := make([]oid.Address, 0, s.rmBatchSize) - - var iterPrm meta.GarbageIterationPrm - iterPrm.SetHandler(func(g meta.GarbageObject) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - buf = append(buf, g.Address()) - - if len(buf) == s.rmBatchSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - // iterate over metabase's objects with GC mark - // (no more than s.rmBatchSize objects) - err := s.metaBase.IterateOverGarbage(ctx, iterPrm) + buf, err := s.getGarbage(ctx) if err != nil { s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -334,7 +278,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { if err != nil { s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects, - zap.String("error", err.Error()), + zap.Error(err), ) result.success = false } @@ -342,13 +286,46 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } +func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + + buf := make([]oid.Address, 0, s.rmBatchSize) + + var iterPrm meta.GarbageIterationPrm + iterPrm.SetHandler(func(g meta.GarbageObject) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + buf = append(buf, g.Address()) + + if len(buf) == s.rmBatchSize { + return meta.ErrInterruptIterator + } + + return nil + }) + + if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { + return nil, err + } + + return buf, nil +} + func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) + workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) + batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) return } -func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { +func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -356,8 +333,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -366,7 +343,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { batch = append(batch, o.Address()) @@ -396,7 +373,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err)) } } @@ -414,24 +391,25 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } + s.handleExpiredObjectsUnsafe(ctx, expired) +} + +func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { + select { + case <-ctx.Done(): + return + default: + } + expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) return } - var inhumePrm meta.InhumePrm - - inhumePrm.SetAddresses(expired...) - inhumePrm.SetGCMark() - - // inhume the collected objects - res, err := s.metaBase.Inhume(ctx, inhumePrm) + res, err := s.inhumeGC(ctx, expired) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) return } @@ -449,6 +427,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) } func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + result := make([]oid.Address, 0, len(source)) parentToChildren, err := s.metaBase.GetChildren(ctx, source) if err != nil { @@ -462,7 +446,20 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) return result, nil } -func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { +func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return meta.InhumeRes{}, err + } + defer release() + + var inhumePrm meta.InhumePrm + inhumePrm.SetAddresses(addrs...) + inhumePrm.SetGCMark() + return s.metaBase.Inhume(ctx, inhumePrm) +} + +func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -470,7 +467,6 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) }() - epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) @@ -503,11 +499,18 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { return } - err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + var release qos.ReleaseFunc + release, err = s.opsLimiter.ReadRequest(ctx) + if err != nil { + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + s.m.RUnlock() + return + } + err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release() if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() - return } @@ -535,7 +538,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } -func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { +func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -543,8 +546,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -554,14 +557,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() == objectSDK.TypeLock { batch = append(batch, o.Address()) if len(batch) == batchSize { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) batch = make([]oid.Address, 0, batchSize) @@ -575,7 +578,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { if len(batch) > 0 { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) } @@ -584,7 +587,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err)) } } @@ -596,7 +599,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo return ErrDegradedMode } - err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { select { case <-ctx.Done(): return meta.ErrInterruptIterator @@ -612,12 +621,11 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo } func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err } + defer release() return s.metaBase.FilterExpired(ctx, epoch, addresses) } @@ -634,12 +642,15 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston return } - res, err := s.metaBase.InhumeTombstones(ctx, tss) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.InhumeTombstones(ctx, tss) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) return } @@ -659,14 +670,22 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // HandleExpiredLocks unlocks all objects which were locked by lockers. // If successful, marks lockers themselves as garbage. func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return + } + + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } unlocked, err := s.metaBase.FreeLockedBy(lockers) + release() if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.String("error", err.Error()), - ) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } @@ -674,13 +693,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] var pInhume meta.InhumePrm pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - - res, err := s.metaBase.Inhume(ctx, pInhume) + release, err = s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.Inhume(ctx, pInhume) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) return } @@ -710,36 +731,40 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc return } - s.handleExpiredObjects(ctx, expiredUnlocked) + s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) } // HandleDeletedLocks unlocks all objects which were locked by lockers. func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } - _, err := s.metaBase.FreeLockedBy(lockers) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.String("error", err.Error()), - ) - + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + _, err = s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } } -// NotificationChannel returns channel for shard events. -func (s *Shard) NotificationChannel() chan<- Event { - return s.gc.eventChan +// NotificationChannel returns channel for new epoch events. +func (s *Shard) NotificationChannel() chan<- uint64 { + return s.gc.newEpochChan } -func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { +func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") defer span.End() - epoch := e.(newEpoch).epoch - s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) @@ -748,7 +773,13 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { } func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroSizeContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return @@ -760,7 +791,13 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui } func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroCountContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 9998bbae2..54d2f1510 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -37,7 +37,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index e3670b441..f512a488a 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { require.NoError(t, err) epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) var getPrm GetPrm getPrm.SetAddress(objectCore.AddressOf(obj)) @@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { require.True(t, errors.As(err, &splitInfoError), "split info must be provided") epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 7a31a705e..28f8912be 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -10,7 +10,6 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -112,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return c.Get(ctx, prm.addr) } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return GetRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) @@ -155,14 +160,12 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta if client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Bool("skip_meta", skipMeta)) } else { s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Bool("skip_meta", skipMeta)) } } if skipMeta || mErr != nil { @@ -175,7 +178,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta mExRes, err := s.metaBase.StorageID(ctx, mPrm) if err != nil { - return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err) + return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err) } storageID := mExRes.StorageID() diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index ff57e3bf9..34b8290d6 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -81,6 +81,12 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { headParams.SetAddress(prm.addr) headParams.SetRaw(prm.raw) + release, limitErr := s.opsLimiter.ReadRequest(ctx) + if limitErr != nil { + return HeadRes{}, limitErr + } + defer release() + var res meta.GetRes res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 6ccae3f53..7391adef2 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -36,7 +36,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { modeDegraded := s.GetMode().NoMetabase() if !modeDegraded { if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil { - err = fmt.Errorf("failed to read shard id from metabase: %w", err) + err = fmt.Errorf("read shard id from metabase: %w", err) } } @@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { } shardID := s.info.ID.String() - s.cfg.metricsWriter.SetShardID(shardID) + s.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } @@ -61,10 +61,11 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { if s.pilorama != nil { s.pilorama.SetParentID(s.info.ID.String()) } + s.opsLimiter.SetParentID(s.info.ID.String()) if len(idFromMetabase) == 0 && !modeDegraded { if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { - err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr)) + err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr)) } } return diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 984c54fbc..c0fd65f4b 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" @@ -82,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return InhumeRes{}, err + } + defer release() + if s.hasWriteCache() { for i := range prm.target { _ = s.writeCache.Delete(ctx, prm.target[i]) @@ -110,8 +115,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { } s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + zap.Error(err), ) s.m.RUnlock() diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 7b267d2e4..af87981ca 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -107,9 +106,15 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, err + } + defer release() + lst, err := s.metaBase.Containers(ctx) if err != nil { - return res, fmt.Errorf("can't list stored containers: %w", err) + return res, fmt.Errorf("list stored containers: %w", err) } filters := objectSDK.NewSearchFilters() @@ -124,8 +129,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { if err != nil { s.log.Debug(ctx, logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), - zap.String("error", err.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } @@ -147,9 +151,15 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo return ListContainersRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListContainersRes{}, err + } + defer release() + containers, err := s.metaBase.Containers(ctx) if err != nil { - return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err) + return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) } return ListContainersRes{ @@ -175,12 +185,18 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List return ListWithCursorRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListWithCursorRes{}, err + } + defer release() + var metaPrm meta.ListPrm metaPrm.SetCount(prm.count) metaPrm.SetCursor(prm.cursor) res, err := s.metaBase.ListWithCursor(ctx, metaPrm) if err != nil { - return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err) + return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err) } return ListWithCursorRes{ @@ -204,11 +220,17 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverContainersPrm metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverContainers(ctx, metaPrm) + err = s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { - return fmt.Errorf("could not iterate over containers: %w", err) + return fmt.Errorf("iterate over containers: %w", err) } return nil @@ -229,13 +251,19 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverObjectsInContainerPrm metaPrm.ContainerID = prm.ContainerID metaPrm.ObjectType = prm.ObjectType metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { - return fmt.Errorf("could not iterate over objects: %w", err) + return fmt.Errorf("iterate over objects: %w", err) } return nil @@ -253,12 +281,18 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + var metaPrm meta.CountAliveObjectsInContainerPrm metaPrm.ObjectType = prm.ObjectType metaPrm.ContainerID = prm.ContainerID count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm) if err != nil { - return 0, fmt.Errorf("could not count alive objects in bucket: %w", err) + return 0, fmt.Errorf("count alive objects in bucket: %w", err) } return count, nil diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 4a8d89d63..9c392fdac 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked [] return ErrDegradedMode } - err := s.metaBase.Lock(ctx, idCnr, locker, locked) + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() + var prm meta.IsLockedPrm prm.SetAddress(addr) @@ -72,10 +84,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return res.Locked(), nil } -// GetLocked return lock id's of the provided object. Not found object is +// GetLocks return lock id's of the provided object. Not found object is // considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise. -func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked", +func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), attribute.String("address", addr.EncodeToString()), @@ -86,5 +98,12 @@ func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, erro if m.NoMetabase() { return nil, ErrDegradedMode } - return s.metaBase.GetLocked(ctx, addr) + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + + return s.metaBase.GetLocks(ctx, addr) } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 5caf3641f..3878a65cd 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -28,9 +28,10 @@ func TestShard_Lock(t *testing.T) { var sh *Shard rootPath := t.TempDir() + l := logger.NewLoggerWrapper(zap.NewNop()) opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + WithLogger(l), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 50125a88d..f8cb00a31 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -67,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var res common.PutRes + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return PutRes{}, err + } + defer release() + // exist check are not performed there, these checks should be executed // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() @@ -76,12 +82,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { if err != nil || !tryCache { if err != nil { s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, - zap.String("err", err.Error())) + zap.Error(err)) } res, err = s.blobStor.Put(ctx, putPrm) if err != nil { - return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err) + return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err) } } @@ -94,7 +100,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { if err != nil { // may we need to handle this case in a special way // since the object has been successfully written to BlobStor - return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err) + return PutRes{}, fmt.Errorf("put object to metabase: %w", err) } if res.Inserted { diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 701268820..443689104 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -131,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { return obj, nil } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return RngRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 146e834cc..06fe9f511 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -79,7 +79,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 10eb51a28..20f1f2b6f 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -6,10 +6,13 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -18,37 +21,9 @@ import ( var ErrRebuildInProgress = errors.New("shard rebuild in progress") -type RebuildWorkerLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -type rebuildLimiter struct { - semaphore chan struct{} -} - -func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter { - return &rebuildLimiter{ - semaphore: make(chan struct{}, workersCount), - } -} - -func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error { - select { - case l.semaphore <- struct{}{}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (l *rebuildLimiter) ReleaseWorkSlot() { - <-l.semaphore -} - type rebuildTask struct { - limiter RebuildWorkerLimiter - fillPercent int + concurrencyLimiter common.RebuildLimiter + fillPercent int } type rebuilder struct { @@ -88,14 +63,14 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D if !ok { continue } - runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter) + runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter) } } }() } func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger, - fillPercent int, limiter RebuildWorkerLimiter, + fillPercent int, concLimiter common.RebuildLimiter, ) { select { case <-ctx.Done(): @@ -103,21 +78,22 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo default: } log.Info(ctx, logs.BlobstoreRebuildStarted) - if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) } } -func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int, +func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int, ) error { select { case <-ctx.Done(): return ctx.Err() case r.tasks <- rebuildTask{ - limiter: limiter, - fillPercent: fillPercent, + concurrencyLimiter: limiter, + fillPercent: fillPercent, }: return nil default: @@ -166,7 +142,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres } type RebuildPrm struct { - ConcurrencyLimiter RebuildWorkerLimiter + ConcurrencyLimiter common.ConcurrencyLimiter TargetFillPercent uint32 } @@ -188,5 +164,30 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error { return ErrDegradedMode } - return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent)) + limiter := &rebuildLimiter{ + concurrencyLimiter: p.ConcurrencyLimiter, + rateLimiter: s.opsLimiter, + } + return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent)) +} + +var _ common.RebuildLimiter = (*rebuildLimiter)(nil) + +type rebuildLimiter struct { + concurrencyLimiter common.ConcurrencyLimiter + rateLimiter qos.Limiter +} + +func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + return r.concurrencyLimiter.AcquireWorkSlot(ctx) +} + +func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.ReadRequest(ctx) + return common.ReleaseFunc(release), err +} + +func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.WriteRequest(ctx) + return common.ReleaseFunc(release), err } diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 184ca9b71..fbc751e26 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -60,6 +60,12 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, nil + } + defer release() + var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) @@ -67,7 +73,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { mRes, err := s.metaBase.Select(ctx, selectPrm) if err != nil { - return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err) + return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err) } return SelectRes{ diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 1eb7f14d0..d89b56266 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,6 +7,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -98,6 +99,8 @@ type cfg struct { reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider + + opsLimiter qos.Limiter } func defaultCfg() *cfg { @@ -109,6 +112,7 @@ func defaultCfg() *cfg { zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, + opsLimiter: qos.NewNoopLimiter(), } } @@ -201,7 +205,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option { func WithLogger(l *logger.Logger) Option { return func(c *cfg) { c.log = l - c.gcCfg.log = l + c.gcCfg.log = l.WithTag(logger.TagGC) } } @@ -214,7 +218,7 @@ func WithWriteCache(use bool) Option { // hasWriteCache returns bool if write cache exists on shards. func (s *Shard) hasWriteCache() bool { - return s.cfg.useWriteCache + return s.useWriteCache } // NeedRefillMetabase returns true if metabase is needed to be refilled. @@ -368,16 +372,22 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { } } -func (s *Shard) fillInfo() { - s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() - s.cfg.info.Mode = s.GetMode() +func WithLimiter(l qos.Limiter) Option { + return func(c *cfg) { + c.opsLimiter = l + } +} - if s.cfg.useWriteCache { - s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() +func (s *Shard) fillInfo() { + s.info.MetaBaseInfo = s.metaBase.DumpInfo() + s.info.BlobStorInfo = s.blobStor.DumpInfo() + s.info.Mode = s.GetMode() + + if s.useWriteCache { + s.info.WriteCacheInfo = s.writeCache.DumpInfo() } if s.pilorama != nil { - s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() + s.info.PiloramaInfo = s.pilorama.DumpInfo() } } @@ -444,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) { s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) } - s.cfg.metricsWriter.SetMode(s.info.Mode) + s.metricsWriter.SetMode(s.info.Mode) } // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - s.cfg.metricsWriter.IncObjectCounter(physical) - s.cfg.metricsWriter.IncObjectCounter(logical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + s.metricsWriter.IncObjectCounter(physical) + s.metricsWriter.IncObjectCounter(logical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) if isUser { - s.cfg.metricsWriter.IncObjectCounter(user) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) + s.metricsWriter.IncObjectCounter(user) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { if v > 0 { - s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) + s.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { if v > 0 { - s.cfg.metricsWriter.SetObjectCounter(typ, v) + s.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { for cnrID, count := range byCnr { if count.Phy > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) } if count.Logic > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) } if count.User > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) } } } func (s *Shard) addToContainerSize(cnr string, size int64) { if size != 0 { - s.cfg.metricsWriter.AddToContainerSize(cnr, size) + s.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { if size != 0 { - s.cfg.metricsWriter.AddToPayloadSize(size) + s.metricsWriter.AddToPayloadSize(size) } } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index f9ee34488..84be71c4d 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -60,7 +60,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index 01a014cec..db361a8bd 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeMove(ctx, d, treeID, m) } @@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } @@ -103,6 +113,11 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } @@ -130,6 +145,11 @@ func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) } @@ -157,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } @@ -182,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Meta{}, 0, err + } + defer release() return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } @@ -207,11 +237,16 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } // TreeSortedByFilename implements the pilorama.Forest interface. -func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), @@ -231,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID if s.info.Mode.NoMetabase() { return nil, last, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, last, err + } + defer release() return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) } @@ -256,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Move{}, err + } + defer release() return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } @@ -280,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeDrop(ctx, cid, treeID) } @@ -303,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeList(ctx, cid) } @@ -326,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u if s.pilorama == nil { return 0, ErrPiloramaDisabled } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeHeight(ctx, cid, treeID) } @@ -350,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() return s.pilorama.TreeExists(ctx, cid, treeID) } @@ -378,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } @@ -402,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } @@ -423,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeListTrees(ctx, prm) } @@ -452,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index f655e477a..9edb89df8 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -67,6 +67,12 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) } @@ -124,6 +130,13 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { close(started) defer cleanup() + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) + return + } + defer release() + s.log.Info(ctx, logs.StartedWritecacheSealAsync) if err := s.writeCache.Seal(ctx, prm); err != nil { s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) @@ -138,5 +151,11 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { return nil } } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Seal(ctx, prm) } diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index e829d013c..ee709ea73 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -6,6 +6,7 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -61,6 +62,7 @@ func New(opts ...Option) Cache { maxCacheSize: defaultMaxCacheSize, metrics: DefaultMetrics(), flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize, + qosLimiter: qos.NewNoopLimiter(), }, } @@ -94,7 +96,8 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error { if err != nil { return metaerr.Wrap(err) } - return metaerr.Wrap(c.initCounters()) + c.initCounters() + return nil } // Init runs necessary services. diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index d9e34ceab..893d27ba2 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -14,6 +15,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -35,6 +37,7 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String()) fl := newFlushLimiter(c.flushSizeLimit) c.wg.Add(1) go func() { @@ -64,7 +67,13 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { continue } - err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { + release, err := c.qosLimiter.ReadRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err)) + c.modeMtx.RUnlock() + continue + } + err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { if err := fl.acquire(oi.DataSize); err != nil { return err } @@ -79,11 +88,15 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { return ctx.Err() } }) + release() if err != nil { c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) } c.modeMtx.RUnlock() + + // counter changed by fstree + c.estimateCacheSize() case <-ctx.Done(): return } @@ -107,6 +120,12 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) { func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) { defer fl.release(objInfo.size) + release, err := c.qosLimiter.WriteRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err)) + return + } + defer release() res, err := c.fsTree.Get(ctx, common.GetPrm{ Address: objInfo.addr, }) diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go index 9ec039f91..e369fbd50 100644 --- a/pkg/local_object_storage/writecache/iterate.go +++ b/pkg/local_object_storage/writecache/iterate.go @@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error { return b.ForEach(func(k, _ []byte) error { err := addr.DecodeString(string(k)) if err != nil { - return fmt.Errorf("could not parse object address: %w", err) + return fmt.Errorf("parse object address: %w", err) } return f(addr) diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go index ddc4101be..0e020b36e 100644 --- a/pkg/local_object_storage/writecache/limiter.go +++ b/pkg/local_object_storage/writecache/limiter.go @@ -3,6 +3,8 @@ package writecache import ( "errors" "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) var errLimiterClosed = errors.New("acquire failed: limiter closed") @@ -45,17 +47,11 @@ func (l *flushLimiter) release(size uint64) { l.cond.L.Lock() defer l.cond.L.Unlock() - if l.size >= size { - l.size -= size - } else { - panic("flushLimiter: invalid size") - } + assert.True(l.size >= size, "flushLimiter: invalid size") + l.size -= size - if l.count > 0 { - l.count-- - } else { - panic("flushLimiter: invalid count") - } + assert.True(l.count > 0, "flushLimiter: invalid count") + l.count-- l.cond.Broadcast() } diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 73d12fd33..c491be60b 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -83,7 +83,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { } if !shrink { if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("can't close write-cache storage: %w", err) + return fmt.Errorf("close write-cache storage: %w", err) } return nil } @@ -98,16 +98,16 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { if errors.Is(err, errIterationCompleted) { empty = false } else { - return fmt.Errorf("failed to check write-cache items: %w", err) + return fmt.Errorf("check write-cache items: %w", err) } } if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("can't close write-cache storage: %w", err) + return fmt.Errorf("close write-cache storage: %w", err) } if empty { err := os.RemoveAll(c.path) if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to remove write-cache files: %w", err) + return fmt.Errorf("remove write-cache files: %w", err) } } else { c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty) diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index f2957fe98..a4f98ad06 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -3,8 +3,8 @@ package writecache import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Option represents write-cache configuration option. @@ -38,12 +38,14 @@ type options struct { disableBackgroundFlush bool // flushSizeLimit is total size of flushing objects. flushSizeLimit uint64 + // qosLimiter used to limit flush RPS. + qosLimiter qos.Limiter } // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = log.With(zap.String("component", "WriteCache")) + o.log = log } } @@ -136,3 +138,9 @@ func WithFlushSizeLimit(v uint64) Option { o.flushSizeLimit = v } } + +func WithQoSLimiter(l qos.Limiter) Option { + return func(o *options) { + o.qosLimiter = l + } +} diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index 7da5c4d3a..2fbf50913 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -2,6 +2,7 @@ package writecache import ( "context" + "fmt" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -59,7 +60,15 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro // putBig writes object to FSTree and pushes it to the flush workers queue. func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { - if !c.hasEnoughSpaceFS() { + if prm.RawData == nil { // foolproof: RawData should be marshalled by shard. + data, err := prm.Object.Marshal() + if err != nil { + return fmt.Errorf("cannot marshal object: %w", err) + } + prm.RawData = data + } + size := uint64(len(prm.RawData)) + if !c.hasEnoughSpace(size) { return ErrOutOfSpace } diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go index 835686fbb..7a52d3672 100644 --- a/pkg/local_object_storage/writecache/state.go +++ b/pkg/local_object_storage/writecache/state.go @@ -7,10 +7,6 @@ func (c *cache) estimateCacheSize() (uint64, uint64) { return count, size } -func (c *cache) hasEnoughSpaceFS() bool { - return c.hasEnoughSpace(c.maxObjectSize) -} - func (c *cache) hasEnoughSpace(objectSize uint64) bool { count, size := c.estimateCacheSize() if c.maxCacheCount > 0 && count+1 > c.maxCacheCount { @@ -19,7 +15,6 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool { return c.maxCacheSize >= size+objectSize } -func (c *cache) initCounters() error { +func (c *cache) initCounters() { c.estimateCacheSize() - return nil } diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index a0e236cb7..e88566cdf 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error { fstree.WithFileCounter(c.counter), ) if err := c.fsTree.Open(mod); err != nil { - return fmt.Errorf("could not open FSTree: %w", err) + return fmt.Errorf("open FSTree: %w", err) } if err := c.fsTree.Init(); err != nil { - return fmt.Errorf("could not init FSTree: %w", err) + return fmt.Errorf("init FSTree: %w", err) } return nil diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go index 3a100f1a3..5eb341ba4 100644 --- a/pkg/local_object_storage/writecache/upgrade.go +++ b/pkg/local_object_storage/writecache/upgrade.go @@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error { return nil } if err != nil { - return fmt.Errorf("could not check write-cache database existence: %w", err) + return fmt.Errorf("check write-cache database existence: %w", err) } db, err := OpenDB(c.path, true, os.OpenFile) if err != nil { - return fmt.Errorf("could not open write-cache database: %w", err) + return fmt.Errorf("open write-cache database: %w", err) } defer func() { _ = db.Close() diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 70b17eb8e..7ed511318 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -52,7 +52,7 @@ type Cache interface { // MainStorage is the interface of the underlying storage of Cache implementations. type MainStorage interface { - Compressor() *compression.Config + Compressor() *compression.Compressor Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) Put(context.Context, common.PutPrm) (common.PutRes, error) } diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index aae245acd..4462daab4 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -1,36 +1,33 @@ package balance import ( + "context" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" ) // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. -func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { - h, err := address.StringToUint160(id.EncodeToString()) - if err != nil { - return nil, err - } +func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) { + h := id.ScriptHash() invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(balanceOfMethod) invokePrm.SetArgs(h) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err) } else if ln := len(prms); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln) } amount, err := client.BigIntFromStackItem(prms[0]) if err != nil { - return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err) + return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err) } return amount, nil } diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go index b05c526dc..1dacb9574 100644 --- a/pkg/morph/client/balance/client.go +++ b/pkg/morph/client/balance/client.go @@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("could not create static client of Balance contract: %w", err) + return nil, fmt.Errorf("create 'balance' contract client: %w", err) } return &Client{ @@ -54,15 +54,7 @@ type Option func(*opts) type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go index 39e4b28e5..57e61d62b 100644 --- a/pkg/morph/client/balance/decimals.go +++ b/pkg/morph/client/balance/decimals.go @@ -1,6 +1,7 @@ package balance import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,20 +9,20 @@ import ( // Decimals decimal precision of currency transactions // through the Balance contract call, and returns it. -func (c *Client) Decimals() (uint32, error) { +func (c *Client) Decimals(ctx context.Context) (uint32, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(decimalsMethod) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err) + return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err) } else if ln := len(prms); ln != 1 { return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln) } decimals, err := client.IntFromStackItem(prms[0]) if err != nil { - return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err) + return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err) } return uint32(decimals), nil } diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 65a0b70a6..870bed166 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" ) // TransferPrm groups parameters of TransferX method. @@ -22,27 +21,18 @@ type TransferPrm struct { // TransferX transfers p.Amount of GASe-12 from p.From to p.To // with details p.Details through direct smart contract call. -// -// If TryNotary is provided, calls notary contract. func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { - from, err := address.StringToUint160(p.From.EncodeToString()) - if err != nil { - return err - } - - to, err := address.StringToUint160(p.To.EncodeToString()) - if err != nil { - return err - } + from := p.From.ScriptHash() + to := p.To.ScriptHash() prm := client.InvokePrm{} prm.SetMethod(transferXMethod) prm.SetArgs(from, to, p.Amount, p.Details) prm.InvokePrmOptional = p.InvokePrmOptional - _, err = c.client.Invoke(ctx, prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err) + return fmt.Errorf("invoke method (%s): %w", transferXMethod, err) } return nil } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index a0c29141b..aab058d27 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -9,6 +9,7 @@ import ( "sync/atomic" "time" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" @@ -60,6 +61,9 @@ type Client struct { rpcActor *actor.Actor // neo-go RPC actor gasToken *nep17.Token // neo-go GAS token wrapper rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper + nnsHash util.Uint160 // NNS contract hash + + nnsReader *nnsClient.ContractReader // NNS contract wrapper acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -94,27 +98,12 @@ type Client struct { type cache struct { m sync.RWMutex - nnsHash *util.Uint160 gKey *keys.PublicKey txHeights *lru.Cache[util.Uint256, uint32] metrics metrics.MorphCacheMetrics } -func (c *cache) nns() *util.Uint160 { - c.m.RLock() - defer c.m.RUnlock() - - return c.nnsHash -} - -func (c *cache) setNNSHash(nnsHash util.Uint160) { - c.m.Lock() - defer c.m.Unlock() - - c.nnsHash = &nnsHash -} - func (c *cache) groupKey() *keys.PublicKey { c.m.RLock() defer c.m.RUnlock() @@ -133,7 +122,6 @@ func (c *cache) invalidate() { c.m.Lock() defer c.m.Unlock() - c.nnsHash = nil c.gKey = nil c.txHeights.Purge() } @@ -163,20 +151,6 @@ func (e *notHaltStateError) Error() string { ) } -// implementation of error interface for FrostFS-specific errors. -type frostfsError struct { - err error -} - -func (e frostfsError) Error() string { - return fmt.Sprintf("frostfs error: %v", e.err) -} - -// wraps FrostFS-specific error into frostfsError. Arg must not be nil. -func wrapFrostFSError(err error) error { - return frostfsError{err} -} - // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. @@ -196,7 +170,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...) if err != nil { - return InvokeRes{}, fmt.Errorf("could not invoke %s: %w", method, err) + return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err) } c.logger.Debug(ctx, logs.ClientNeoClientInvoke, @@ -210,10 +184,10 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F // TestInvokeIterator invokes contract method returning an iterator and executes cb on each element. // If cb returns an error, the session is closed and this error is returned as-is. -// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. +// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. // batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. // The default batchSize is 100, the default limit from neo-go. -func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error { +func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error { start := time.Now() success := false defer func() { @@ -240,7 +214,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int if err != nil { return err } else if val.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return ¬HaltStateError{state: val.State, exception: val.FaultException} } arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) @@ -262,10 +236,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int }() // Batch size for TraverseIterator() can restricted on the server-side. - traverseBatchSize := batchSize - if invoker.DefaultIteratorResultItems < traverseBatchSize { - traverseBatchSize = invoker.DefaultIteratorResultItems - } + traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems) for { items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize) if err != nil { @@ -307,7 +278,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) ( } if val.State != HaltState { - return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} } success = true @@ -390,7 +361,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight, - zap.String("error", err.Error())) + zap.Error(err)) return nil } @@ -404,7 +375,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243, - zap.String("error", err.Error())) + zap.Error(err)) return nil } @@ -499,7 +470,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { // NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain // stores alphabet node keys of inner ring there, however the sidechain stores both // alphabet and non alphabet node keys of inner ring. -func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { +func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -509,7 +480,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { list, err := c.roleList(noderoles.NeoFSAlphabet) if err != nil { - return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err) + return nil, fmt.Errorf("get alphabet nodes role list: %w", err) } return list, nil @@ -523,7 +494,7 @@ func (c *Client) GetDesignateHash() util.Uint160 { func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) { height, err := c.rpcActor.GetBlockCount() if err != nil { - return nil, fmt.Errorf("can't get chain height: %w", err) + return nil, fmt.Errorf("get chain height: %w", err) } return c.rolemgmt.GetDesignatedByRole(r, height) @@ -594,6 +565,7 @@ func (c *Client) setActor(act *actor.Actor) { c.rpcActor = act c.gasToken = nep17.New(act, gas.Hash) c.rolemgmt = rolemgmt.New(act) + c.nnsReader = nnsClient.NewReader(act, c.nnsHash) } func (c *Client) GetActor() *actor.Actor { diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index d061747bb..e4dcd0db7 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -145,6 +145,11 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er if cli.client == nil { return nil, ErrNoHealthyEndpoint } + cs, err := cli.client.GetContractStateByID(nnsContractID) + if err != nil { + return nil, fmt.Errorf("resolve nns hash: %w", err) + } + cli.nnsHash = cs.Hash cli.setActor(act) go cli.closeWaiter(ctx) diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index bdbcce917..be684619b 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -46,9 +46,9 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts[i](o) } - sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...) + sc, err := client.NewStatic(cli, contract, fee, *o...) if err != nil { - return nil, fmt.Errorf("can't create container static client: %w", err) + return nil, fmt.Errorf("create 'container' contract client: %w", err) } return &Client{client: sc}, nil @@ -68,20 +68,10 @@ func (c Client) ContractAddress() util.Uint160 { // parameter of Wrapper. type Option func(*opts) -type opts struct { - staticOpts []client.StaticClientOption -} +type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - o.staticOpts = append(o.staticOpts, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX @@ -91,6 +81,6 @@ func TryNotary() Option { // Considered to be used by IR nodes only. func AsAlphabet() Option { return func(o *opts) { - o.staticOpts = append(o.staticOpts, client.AsAlphabet()) + *o = append(*o, client.AsAlphabet()) } } diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index c4db0fe6e..60fb8ad7c 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -1,10 +1,9 @@ package container import ( + "context" "errors" - "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" @@ -15,28 +14,37 @@ import ( // to the specified user of FrostFS system. If idUser is nil, returns the list of all containers. // // If remote RPC does not support neo-go session API, fallback to List() method. -func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { - var rawID []byte +func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) { + var cidList []cid.ID + var err error + cb := func(id cid.ID) error { + cidList = append(cidList, id) + return nil + } + if err = c.IterateContainersOf(ctx, idUser, cb); err != nil { + return nil, err + } + return cidList, nil +} + +// iterateContainers iterates over a list of container identifiers +// belonging to the specified user of FrostFS system and executes +// `cb` on each element. If idUser is nil, calls it on the list of all containers. +func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error { + var rawID []byte if idUser != nil { rawID = idUser.WalletBytes() } - var cidList []cid.ID - cb := func(item stackitem.Item) error { - rawID, err := client.BytesFromStackItem(item) + itemCb := func(item stackitem.Item) error { + id, err := getCIDfromStackItem(item) if err != nil { - return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err) + return err } - - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return fmt.Errorf("decode container ID: %w", err) + if err = cb(id); err != nil { + return err } - - cidList = append(cidList, id) return nil } @@ -50,13 +58,10 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { const batchSize = 512 cnrHash := c.client.ContractAddress() - err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID) - if err != nil { - if errors.Is(err, unwrap.ErrNoSessionID) { - return c.list(idUser) - } - return nil, err + err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) + if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { + return c.iterate(ctx, idUser, cb) } - return cidList, nil + return err } diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go index 5696645b2..09912efa5 100644 --- a/pkg/morph/client/container/delete.go +++ b/pkg/morph/client/container/delete.go @@ -66,8 +66,6 @@ func (d *DeletePrm) SetKey(key []byte) { // // Returns valid until block and any error encountered that caused // the removal to interrupt. -// -// If TryNotary is provided, calls notary contract. func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { if len(p.signature) == 0 && !p.IsControl() { return 0, errNilArgument @@ -80,7 +78,7 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { res, err := c.client.Invoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err) + return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err) } return res.VUB, nil } diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go index dda6bf98c..90bcdd7d5 100644 --- a/pkg/morph/client/container/deletion_info.go +++ b/pkg/morph/client/container/deletion_info.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" "strings" @@ -14,39 +15,39 @@ import ( "github.com/mr-tron/base58" ) -func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) { - return DeletionInfo((*Client)(x), cnr) +func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) { + return DeletionInfo(ctx, (*Client)(x), cnr) } type deletionInfo interface { - DeletionInfo(cid []byte) (*containercore.DelInfo, error) + DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) } -func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { +func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.DeletionInfo(binCnr) + return c.DeletionInfo(ctx, binCnr) } -func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { +func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) { prm := client.TestInvokePrm{} prm.SetMethod(deletionInfoMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) } - return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln) } arr, err := client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err) } if len(arr) != 2 { @@ -55,17 +56,17 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { rawOwner, err := client.BytesFromStackItem(arr[0]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err) } var owner user.ID if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil { - return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err) } epoch, err := client.BigIntFromStackItem(arr[1]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err) } return &containercore.DelInfo{ diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index ea57a3a95..8622d2cdd 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" "strings" @@ -16,8 +17,8 @@ import ( type containerSource Client -func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) { - return Get((*Client)(x), cnr) +func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) { + return Get(ctx, (*Client)(x), cnr) } // AsContainerSource provides container Source interface @@ -27,15 +28,15 @@ func AsContainerSource(w *Client) containercore.Source { } type getContainer interface { - Get(cid []byte) (*containercore.Container, error) + Get(ctx context.Context, cid []byte) (*containercore.Container, error) } // Get marshals container ID, and passes it to Wrapper's Get method. -func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { +func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.Get(binCnr) + return c.Get(ctx, binCnr) } // Get reads the container from FrostFS system by binary identifier @@ -43,24 +44,24 @@ func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { // // If an empty slice is returned for the requested identifier, // storage.ErrNotFound error is returned. -func (c *Client) Get(cid []byte) (*containercore.Container, error) { +func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { prm := client.TestInvokePrm{} prm.SetMethod(getMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) } - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln) } arr, err := client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err) + return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err) } if len(arr) != 4 { @@ -69,29 +70,29 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { cnrBytes, err := client.BytesFromStackItem(arr[0]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err) } sigBytes, err := client.BytesFromStackItem(arr[1]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err) } pub, err := client.BytesFromStackItem(arr[2]) if err != nil { - return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err) } tokBytes, err := client.BytesFromStackItem(arr[3]) if err != nil { - return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err) } var cnr containercore.Container if err := cnr.Value.Unmarshal(cnrBytes); err != nil { // use other major version if there any - return nil, fmt.Errorf("can't unmarshal container: %w", err) + return nil, fmt.Errorf("unmarshal container: %w", err) } if len(tokBytes) > 0 { @@ -99,7 +100,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { err = cnr.Session.Unmarshal(tokBytes) if err != nil { - return nil, fmt.Errorf("could not unmarshal session token: %w", err) + return nil, fmt.Errorf("unmarshal session token: %w", err) } } diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index 6fed46c1a..fc63d1beb 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -1,20 +1,22 @@ package container import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" ) -// list returns a list of container identifiers belonging +// iterate iterates through a list of container identifiers belonging // to the specified user of FrostFS system. The list is composed // through Container contract call. // -// Returns the identifiers of all FrostFS containers if pointer +// Iterates through the identifiers of all FrostFS containers if pointer // to user identifier is nil. -func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { +func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error { var rawID []byte if idUser != nil { @@ -25,34 +27,43 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { prm.SetMethod(listMethod) prm.SetArgs(rawID) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err) + return fmt.Errorf("test invoke (%s): %w", listMethod, err) } else if ln := len(res); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) + return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) } res, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err) + return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err) } - cidList := make([]cid.ID, 0, len(res)) for i := range res { - rawID, err := client.BytesFromStackItem(res[i]) + id, err := getCIDfromStackItem(res[i]) if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err) + return err } - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) + if err = cb(id); err != nil { + return err } - - cidList = append(cidList, id) } - return cidList, nil + return nil +} + +func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) { + rawID, err := client.BytesFromStackItem(item) + if err != nil { + return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err) + } + + var id cid.ID + + err = id.Decode(rawID) + if err != nil { + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) + } + return id, nil } diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index 74d9f6da8..3bb84eb87 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -94,8 +94,6 @@ func (p *PutPrm) SetZone(zone string) { // // Returns calculated container identifier and any error // encountered that caused the saving to interrupt. -// -// If TryNotary is provided, calls notary contract. func (c *Client) Put(ctx context.Context, p PutPrm) error { if len(p.sig) == 0 || len(p.key) == 0 { return errNilArgument @@ -119,7 +117,7 @@ func (c *Client) Put(ctx context.Context, p PutPrm) error { _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", method, err) + return fmt.Errorf("invoke method (%s): %w", method, err) } return nil } diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go index 571915c27..cd6a9849e 100644 --- a/pkg/morph/client/frostfs/client.go +++ b/pkg/morph/client/frostfs/client.go @@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err) + return nil, fmt.Errorf("create 'frostfs' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go index 4c31f42de..61eb03f09 100644 --- a/pkg/morph/client/frostfsid/client.go +++ b/pkg/morph/client/frostfsid/client.go @@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil) func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) { sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet()) if err != nil { - return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err) + return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go index 0852f536c..3a789672a 100644 --- a/pkg/morph/client/frostfsid/subject.go +++ b/pkg/morph/client/frostfsid/subject.go @@ -1,6 +1,7 @@ package frostfsid import ( + "context" "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" @@ -14,14 +15,14 @@ const ( methodGetSubjectExtended = "getSubjectExtended" ) -func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubject) prm.SetArgs(addr) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err) } structArr, err := checkStackItem(res) @@ -31,20 +32,20 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) subj, err := frostfsidclient.ParseSubject(structArr) if err != nil { - return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) } return subj, nil } -func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { +func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubjectExtended) prm.SetArgs(addr) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err) + return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err) } structArr, err := checkStackItem(res) @@ -54,7 +55,7 @@ func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.Subject subj, err := frostfsidclient.ParseSubjectExtended(structArr) if err != nil { - return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) } return subj, nil @@ -67,7 +68,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error structArr, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err) } return } diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index 708d3b39f..b9e39c25e 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -2,6 +2,7 @@ package client import ( "context" + "slices" "sort" "time" @@ -99,8 +100,7 @@ mainLoop: case <-t.C: c.switchLock.RLock() - endpointsCopy := make([]Endpoint, len(c.endpoints.list)) - copy(endpointsCopy, c.endpoints.list) + endpointsCopy := slices.Clone(c.endpoints.list) currPriority := c.endpoints.list[c.endpoints.curr].Priority highestPriority := c.endpoints.list[0].Priority diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go index eafa097e9..de8afbfb5 100644 --- a/pkg/morph/client/netmap/client.go +++ b/pkg/morph/client/netmap/client.go @@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("can't create netmap static client: %w", err) + return nil, fmt.Errorf("create 'netmap' contract client: %w", err) } return &Client{client: sc}, nil @@ -65,15 +65,7 @@ type Option func(*opts) type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 0a3c351db..3f6aed506 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -2,7 +2,6 @@ package netmap import ( "context" - "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -25,75 +24,45 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. -func (c *Client) MaxObjectSize() (uint64, error) { - objectSize, err := c.readUInt64Config(MaxObjectSizeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err) - } - - return objectSize, nil +func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, MaxObjectSizeConfig) } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. -func (c *Client) EpochDuration() (uint64, error) { - epochDuration, err := c.readUInt64Config(EpochDurationConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err) - } - - return epochDuration, nil +func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, EpochDurationConfig) } // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. -func (c *Client) ContainerFee() (uint64, error) { - fee, err := c.readUInt64Config(ContainerFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err) - } - - return fee, nil +func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, ContainerFeeConfig) } // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. -func (c *Client) ContainerAliasFee() (uint64, error) { - fee, err := c.readUInt64Config(ContainerAliasFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err) - } - - return fee, nil +func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, ContainerAliasFeeConfig) } // HomomorphicHashDisabled returns global configuration value of homomorphic hashing // settings. // // Returns (false, nil) if config key is not found in the contract. -func (c *Client) HomomorphicHashDisabled() (bool, error) { - return c.readBoolConfig(HomomorphicHashingDisabledKey) +func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { + return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey) } // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. -func (c *Client) InnerRingCandidateFee() (uint64, error) { - fee, err := c.readUInt64Config(IrCandidateFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err) - } - - return fee, nil +func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, IrCandidateFeeConfig) } // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. -func (c *Client) WithdrawFee() (uint64, error) { - fee, err := c.readUInt64Config(WithdrawFeeConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err) - } - - return fee, nil +func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { + return c.readUInt64Config(ctx, WithdrawFeeConfig) } // MaintenanceModeAllowed reads admission of "maintenance" state from the @@ -101,34 +70,32 @@ func (c *Client) WithdrawFee() (uint64, error) { // that storage nodes are allowed to switch their state to "maintenance". // // By default, maintenance state is disallowed. -func (c *Client) MaintenanceModeAllowed() (bool, error) { - return c.readBoolConfig(MaintenanceModeAllowedConfig) +func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { + return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig) } -func (c *Client) readUInt64Config(key string) (uint64, error) { - v, err := c.config([]byte(key), IntegerAssert) +func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { + v, err := c.config(ctx, []byte(key)) + if err != nil { + return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) + } + + bi, err := v.TryInteger() if err != nil { return 0, err } - - // IntegerAssert is guaranteed to return int64 if the error is nil. - return uint64(v.(int64)), nil + return bi.Uint64(), nil } // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. -func (c *Client) readBoolConfig(key string) (bool, error) { - v, err := c.config([]byte(key), BoolAssert) +func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { + v, err := c.config(ctx, []byte(key)) if err != nil { - if errors.Is(err, ErrConfigNotFound) { - return false, nil - } - - return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err) + return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } - // BoolAssert is guaranteed to return bool if the error is nil. - return v.(bool), nil + return v.TryBool() } // SetConfigPrm groups parameters of SetConfig operation. @@ -199,14 +166,14 @@ type NetworkConfiguration struct { } // ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain. -func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) { +func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) { var res NetworkConfiguration prm := client.TestInvokePrm{} prm.SetMethod(configListMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return res, fmt.Errorf("could not perform test invocation (%s): %w", + return res, fmt.Errorf("test invoke (%s): %w", configListMethod, err) } @@ -277,22 +244,18 @@ func bytesToBool(val []byte) bool { return false } -// ErrConfigNotFound is returned when the requested key was not found -// in the network config (returned value is `Null`). -var ErrConfigNotFound = errors.New("config value not found") - // config performs the test invoke of get config value // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) { +func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", configMethod, err) } @@ -301,26 +264,7 @@ func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (a configMethod, ln) } - if _, ok := items[0].(stackitem.Null); ok { - return nil, ErrConfigNotFound - } - - return assert(items[0]) -} - -// IntegerAssert converts stack item to int64. -func IntegerAssert(item stackitem.Item) (any, error) { - return client.IntFromStackItem(item) -} - -// StringAssert converts stack item to string. -func StringAssert(item stackitem.Item) (any, error) { - return client.StringFromStackItem(item) -} - -// BoolAssert converts stack item to bool. -func BoolAssert(item stackitem.Item) (any, error) { - return client.BoolFromStackItem(item) + return items[0], nil } // iterateRecords iterates over all config records and passes them to f. diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go index 92d569ae2..8561329ec 100644 --- a/pkg/morph/client/netmap/epoch.go +++ b/pkg/morph/client/netmap/epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,13 +9,13 @@ import ( // Epoch receives number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) Epoch() (uint64, error) { +func (c *Client) Epoch(ctx context.Context) (uint64, error) { prm := client.TestInvokePrm{} prm.SetMethod(epochMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", + return 0, fmt.Errorf("test invoke (%s): %w", epochMethod, err) } @@ -25,20 +26,20 @@ func (c *Client) Epoch() (uint64, error) { num, err := client.IntFromStackItem(items[0]) if err != nil { - return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err) + return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err) } return uint64(num), nil } // LastEpochBlock receives block number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) LastEpochBlock() (uint32, error) { +func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) { prm := client.TestInvokePrm{} prm.SetMethod(lastEpochBlockMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", + return 0, fmt.Errorf("test invoke (%s): %w", lastEpochBlockMethod, err) } @@ -49,7 +50,7 @@ func (c *Client) LastEpochBlock() (uint32, error) { block, err := client.IntFromStackItem(items[0]) if err != nil { - return 0, fmt.Errorf("could not get number from stack item (%s): %w", + return 0, fmt.Errorf("get number from stack item (%s): %w", lastEpochBlockMethod, err) } return uint32(block), nil diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index c9dc7d2fc..0e1f9186b 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -40,13 +40,13 @@ func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { } // GetInnerRingList return current IR list. -func (c *Client) GetInnerRingList() (keys.PublicKeys, error) { +func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(innerRingListMethod) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err) } return irKeysFromStackItem(prms, innerRingListMethod) @@ -59,7 +59,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys irs, err := client.ArrayFromStackItem(stack[0]) if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err) + return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err) } irKeys := make(keys.PublicKeys, len(irs)) @@ -79,7 +79,7 @@ const irNodeFixedPrmNumber = 1 func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { prms, err := client.ArrayFromStackItem(prm) if err != nil { - return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err) + return nil, fmt.Errorf("get stack item array (IRNode): %w", err) } else if ln := len(prms); ln != irNodeFixedPrmNumber { return nil, fmt.Errorf( "unexpected stack item count (IRNode): expected %d, has %d", @@ -90,7 +90,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { byteKey, err := client.BytesFromStackItem(prms[0]) if err != nil { - return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err) + return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err) } return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256()) diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go index f7b5c3ba4..97782fc25 100644 --- a/pkg/morph/client/netmap/netmap.go +++ b/pkg/morph/client/netmap/netmap.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" @@ -11,14 +12,14 @@ import ( // GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and // decodes netmap.NetMap from the response. -func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(epochSnapshotMethod) invokePrm.SetArgs(epoch) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", epochSnapshotMethod, err) } @@ -34,13 +35,13 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { // GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo // from the response. -func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { +func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapCandidatesMethod) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err) } if len(res) > 0 { @@ -51,13 +52,13 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { } // NetMap calls "netmap" method and decode netmap.NetMap from the response. -func (c *Client) NetMap() (*netmap.NetMap, error) { +func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapMethod) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", netMapMethod, err) } diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go index efcdfd7b6..341b20935 100644 --- a/pkg/morph/client/netmap/new_epoch.go +++ b/pkg/morph/client/netmap/new_epoch.go @@ -16,7 +16,7 @@ func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error { _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) + return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) } return nil } @@ -34,7 +34,7 @@ func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) res, err := c.client.Invoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) + return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) } return res.VUB, nil } diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go index 949e8cb63..e83acde39 100644 --- a/pkg/morph/client/netmap/peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -41,7 +41,7 @@ func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error { prm.InvokePrmOptional = p.InvokePrmOptional if _, err := c.client.Invoke(ctx, prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", method, err) + return fmt.Errorf("invoke method (%s): %w", method, err) } return nil } diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go index ba2c26af7..9dbec1a90 100644 --- a/pkg/morph/client/netmap/snapshot.go +++ b/pkg/morph/client/netmap/snapshot.go @@ -1,19 +1,22 @@ package netmap import ( + "context" + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) // GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response. -func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { prm := client.TestInvokePrm{} prm.SetMethod(snapshotMethod) prm.SetArgs(diff) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { - return nil, err + return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err) } return DecodeNetMap(res) diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index 218f7ad8e..bc00eb889 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -8,14 +8,12 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) const ( @@ -37,12 +35,8 @@ const ( NNSPolicyContractName = "policy.frostfs" ) -var ( - // ErrNNSRecordNotFound means that there is no such record in NNS contract. - ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") - - errEmptyResultStack = errors.New("returned result stack is empty") -) +// ErrNNSRecordNotFound means that there is no such record in NNS contract. +var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") // NNSAlphabetContractName returns contract name of the alphabet contract in NNS // based on alphabet index. @@ -61,97 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - nnsHash, err := c.NNSHash() - if err != nil { - return util.Uint160{}, err - } - - sh, err = nnsResolve(c.client, nnsHash, name) + sh, err = nnsResolve(c.nnsReader, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) } return sh, nil } -// NNSHash returns NNS contract hash. -func (c *Client) NNSHash() (util.Uint160, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint160{}, ErrConnectionLost - } - - success := false - startedAt := time.Now() - - defer func() { - c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt)) - }() - - nnsHash := c.cache.nns() - - if nnsHash == nil { - cs, err := c.client.GetContractStateByID(nnsContractID) - if err != nil { - return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err) - } - - c.cache.setNNSHash(cs.Hash) - nnsHash = &cs.Hash - } - success = true - return *nnsHash, nil -} - -func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { - found, err := exists(c, nnsHash, domain) +func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { + available, err := r.IsAvailable(domain) if err != nil { - return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err) + return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } - if !found { + if available { return nil, ErrNNSRecordNotFound } - result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - { - Type: smartcontract.IntegerType, - Value: big.NewInt(int64(nns.TXT)), - }, - }, nil) - if err != nil { - return nil, err - } - if result.State != vmstate.Halt.String() { - return nil, fmt.Errorf("invocation failed: %s", result.FaultException) - } - if len(result.Stack) == 0 { - return nil, errEmptyResultStack - } - return result.Stack[0], nil + return r.Resolve(domain, big.NewInt(int64(nns.TXT))) } -func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) { - res, err := nnsResolveItem(c, nnsHash, domain) +func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { + arr, err := nnsResolveItem(r, domain) if err != nil { return util.Uint160{}, err } - // Parse the result of resolving NNS record. - // It works with multiple formats (corresponding to multiple NNS versions). - // If array of hashes is provided, it returns only the first one. - if arr, ok := res.Value().([]stackitem.Item); ok { - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") - } - res = arr[0] + if len(arr) == 0 { + return util.Uint160{}, errors.New("NNS record is missing") } - bs, err := res.TryBytes() + bs, err := arr[0].TryBytes() if err != nil { return util.Uint160{}, fmt.Errorf("malformed response: %w", err) } @@ -171,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti return util.Uint160{}, errors.New("no valid hashes are found") } -func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) { - result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - }, nil) - if err != nil { - return false, err - } - - if len(result.Stack) == 0 { - return false, errEmptyResultStack - } - - res := result.Stack[0] - - available, err := res.TryBool() - if err != nil { - return false, fmt.Errorf("malformed response: %w", err) - } - - // not available means that it is taken - // and, therefore, exists - return !available, nil -} - // SetGroupSignerScope makes the default signer scope include all FrostFS contracts. // Should be called for side-chain client only. func (c *Client) SetGroupSignerScope() error { @@ -241,18 +147,12 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - nnsHash, err := c.NNSHash() + arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) if err != nil { return nil, err } - item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) - if err != nil { - return nil, err - } - - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) == 0 { + if len(arr) == 0 { return nil, errors.New("NNS record is missing") } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 71232cb33..448702613 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -38,8 +38,7 @@ type ( alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - notary util.Uint160 - proxy util.Uint160 + proxy util.Uint160 } notaryCfg struct { @@ -58,16 +57,11 @@ const ( defaultNotaryValidTime = 50 defaultNotaryRoundTime = 100 - notaryBalanceOfMethod = "balanceOf" - notaryExpirationOfMethod = "expirationOf" - setDesignateMethod = "designateAsRole" + setDesignateMethod = "designateAsRole" - notaryBalanceErrMsg = "can't fetch notary balance" notaryNotEnabledPanicMsg = "notary support was not enabled on this client" ) -var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack") - func defaultNotaryConfig(c *Client) *notaryCfg { return ¬aryCfg{ txValidTime: defaultNotaryValidTime, @@ -107,7 +101,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, alphabetSource: cfg.alphabetSource, - notary: notary.Hash, } c.notary = notaryCfg @@ -155,15 +148,16 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta bc, err := c.rpcActor.GetBlockCount() if err != nil { - return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err) + return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err) } - currentTill, err := c.depositExpirationOf() + r := notary.NewReader(c.rpcActor) + currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash()) if err != nil { - return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err) + return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err) } - till := max(int64(bc+delta), currentTill) + till := max(int64(bc+delta), int64(currentTill)) res, _, err := c.depositNotary(ctx, amount, till) return res, err } @@ -192,12 +186,12 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, - c.notary.notary, + notary.Hash, big.NewInt(int64(amount)), []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { if !errors.Is(err, neorpc.ErrAlreadyExists) { - return util.Uint256{}, 0, fmt.Errorf("can't make notary deposit: %w", err) + return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err) } // Transaction is already in mempool waiting to be processed. @@ -237,18 +231,10 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) { sh := c.acc.PrivateKey().PublicKey().GetScriptHash() - items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh) + r := notary.NewReader(c.rpcActor) + bigIntDeposit, err := r.BalanceOf(sh) if err != nil { - return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err) - } - - if len(items) != 1 { - return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems)) - } - - bigIntDeposit, err := items[0].TryInteger() - if err != nil { - return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)) + return 0, fmt.Errorf("get notary deposit: %w", err) } return bigIntDeposit.Int64(), nil @@ -289,7 +275,7 @@ func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) if err != nil { - return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err) + return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) } return c.notaryInvokeAsCommittee( @@ -338,7 +324,7 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) if err != nil { - return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err) + return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) } return c.notaryInvokeAsCommittee( @@ -407,7 +393,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { alphabetList, err := c.notary.alphabetSource() if err != nil { - return fmt.Errorf("could not fetch current alphabet keys: %w", err) + return fmt.Errorf("fetch current alphabet keys: %w", err) } cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList) @@ -475,7 +461,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { if r.State != vmstate.Halt.String() { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.ValidUntilBlock = until @@ -529,24 +515,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet if ok { pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256()) if err != nil { - return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err) + return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err) } acc = notary.FakeSimpleAccount(pub) } else { m, pubsBytes, ok := vm.ParseMultiSigContract(script) if !ok { - return nil, errors.New("failed to parse verification script of signer #2: unknown witness type") + return nil, errors.New("parse verification script of signer #2: unknown witness type") } pubs := make(keys.PublicKeys, len(pubsBytes)) for i := range pubs { pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256()) if err != nil { - return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err) + return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err) } } acc, err = notary.FakeMultisigAccount(m, pubs) if err != nil { - return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err) + return nil, fmt.Errorf("create fake account for signer #2: %w", err) } } } @@ -622,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) err := multisigAccount.ConvertMultisig(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) } } else { // alphabet multisig redeem script is @@ -631,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB // inner ring multiaddress witness multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) } } @@ -642,7 +626,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB func (c *Client) notaryTxValidationLimit() (uint32, error) { bc, err := c.rpcActor.GetBlockCount() if err != nil { - return 0, fmt.Errorf("can't get current blockchain height: %w", err) + return 0, fmt.Errorf("get current blockchain height: %w", err) } minTime := bc + c.notary.txValidTime @@ -651,24 +635,6 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) { return rounded, nil } -func (c *Client) depositExpirationOf() (int64, error) { - expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash()) - if err != nil { - return 0, fmt.Errorf("can't invoke method: %w", err) - } - - if len(expirationRes) != 1 { - return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes)) - } - - currentTillBig, err := expirationRes[0].TryInteger() - if err != nil { - return 0, fmt.Errorf("can't parse deposit till value: %w", err) - } - - return currentTillBig.Int64(), nil -} - // sigCount returns the number of required signature. // For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT). // If committee is true, returns M as N/2+1. @@ -742,12 +708,12 @@ func alreadyOnChainError(err error) bool { func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) { notaryBalance, err := c.GetNotaryDeposit() if err != nil { - return 0, fmt.Errorf("could not get notary balance: %w", err) + return 0, fmt.Errorf("get notary balance: %w", err) } gasBalance, err := c.GasBalance() if err != nil { - return 0, fmt.Errorf("could not get GAS balance: %w", err) + return 0, fmt.Errorf("get GAS balance: %w", err) } if gasBalance == 0 { @@ -796,12 +762,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool) if hash != nil { height, err = c.getTransactionHeight(*hash) if err != nil { - return 0, 0, fmt.Errorf("could not get transaction height: %w", err) + return 0, 0, fmt.Errorf("get transaction height: %w", err) } } else { height, err = c.rpcActor.GetBlockCount() if err != nil { - return 0, 0, fmt.Errorf("could not get chain height: %w", err) + return 0, 0, fmt.Errorf("get chain height: %w", err) } } diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index be4c09182..c4eb120d2 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -159,7 +160,7 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash) } if err != nil { - return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err) + return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err) } vubP = &vub @@ -205,7 +206,9 @@ func (ti *TestInvokePrm) SetArgs(args ...any) { } // TestInvoke calls TestInvoke method of Client with static internal script hash. -func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) { +func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) { + _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method) + defer span.End() return s.client.TestInvoke( s.scScriptHash, prm.method, diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index cd55d6bd2..f7b6705a8 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) { case stackitem.IntegerT: n, err := param.TryInteger() if err != nil { - return nil, fmt.Errorf("can't parse integer bytes: %w", err) + return nil, fmt.Errorf("parse integer bytes: %w", err) } return n.Bytes(), nil @@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) { func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { return func(r *result.Invoke, t *transaction.Transaction) error { if r.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.SystemFee += add diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 962ec1bc2..87fcf84b8 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -33,13 +33,13 @@ func (w *waiterClient) GetVersion() (*result.Version, error) { // WaitTxHalt waits until transaction with the specified hash persists on the blockchain. // It also checks execution result to finish in HALT state. -func (c *Client) WaitTxHalt(ctx context.Context, p InvokeRes) error { +func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error { w, err := waiter.NewPollingBased(&waiterClient{c: c}) if err != nil { return fmt.Errorf("create tx waiter: %w", err) } - res, err := w.WaitAny(ctx, p.VUB, p.Hash) + res, err := w.WaitAny(ctx, vub, h) if err != nil { return fmt.Errorf("wait until tx persists: %w", err) } @@ -47,5 +47,5 @@ func (c *Client) WaitTxHalt(ctx context.Context, p InvokeRes) error { if res.VMState.HasFlag(vmstate.Halt) { return nil } - return wrapFrostFSError(¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}) + return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} } diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go index 062a2a886..99f80584a 100644 --- a/pkg/morph/event/balance/lock.go +++ b/pkg/morph/event/balance/lock.go @@ -3,7 +3,7 @@ package balance import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -47,61 +47,17 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash } // ParseLock from notification into lock structure. func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Lock - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var le balance.LockEvent + if err := le.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse balance.LockEvent: %w", err) } - if ln := len(params); ln != 5 { - return nil, event.WrongNumberOfParameters(5, ln) - } - - // parse id - ev.id, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get lock id: %w", err) - } - - // parse user - user, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get lock user value: %w", err) - } - - ev.user, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err) - } - - // parse lock account - lock, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get lock account value: %w", err) - } - - ev.lock, err = util.Uint160DecodeBytesBE(lock) - if err != nil { - return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err) - } - - // parse amount - ev.amount, err = client.IntFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get lock amount: %w", err) - } - - // parse until deadline - ev.until, err = client.IntFromStackItem(params[4]) - if err != nil { - return nil, fmt.Errorf("could not get lock deadline: %w", err) - } - - ev.txHash = e.Container - - return ev, nil + return Lock{ + id: le.TxID, + user: le.From, + lock: le.To, + amount: le.Amount.Int64(), + until: le.Until.Int64(), + txHash: e.Container, + }, nil } diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go index 9199bcd55..87b91aede 100644 --- a/pkg/morph/event/balance/lock_test.go +++ b/pkg/morph/event/balance/lock_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -28,7 +27,7 @@ func TestParseLock(t *testing.T) { } _, err := ParseLock(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong id parameter", func(t *testing.T) { diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go index a206307f8..d28f6d521 100644 --- a/pkg/morph/event/container/delete.go +++ b/pkg/morph/event/container/delete.go @@ -3,7 +3,7 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -58,28 +58,14 @@ func (DeleteSuccess) MorphEvent() {} // ParseDeleteSuccess decodes notification event thrown by Container contract into // DeleteSuccess and returns it as event.Event. func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array from raw notification event: %w", err) + var dse container.DeleteSuccessEvent + if err := dse.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err) } - const expectedItemNumDeleteSuccess = 1 - - if ln := len(items); ln != expectedItemNumDeleteSuccess { - return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln) - } - - binID, err := client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("parse container ID item: %w", err) - } - - var res DeleteSuccess - - err = res.ID.Decode(binID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - return res, nil + var cnr cid.ID + cnr.SetSHA256(dse.ContainerID) + return DeleteSuccess{ + ID: cnr, + }, nil } diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go index 627c5fcf5..62e7d7277 100644 --- a/pkg/morph/event/container/delete_test.go +++ b/pkg/morph/event/container/delete_test.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -18,7 +17,7 @@ func TestParseDeleteSuccess(t *testing.T) { } _, err := ParseDeleteSuccess(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong container parameter", func(t *testing.T) { diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go index 335034bf3..b09394ba4 100644 --- a/pkg/morph/event/container/put.go +++ b/pkg/morph/event/container/put.go @@ -3,7 +3,7 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -78,33 +78,14 @@ func (PutSuccess) MorphEvent() {} // ParsePutSuccess decodes notification event thrown by Container contract into // PutSuccess and returns it as event.Event. func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array from raw notification event: %w", err) + var pse container.PutSuccessEvent + if err := pse.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err) } - const expectedItemNumPutSuccess = 2 - - if ln := len(items); ln != expectedItemNumPutSuccess { - return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln) - } - - binID, err := client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("parse container ID item: %w", err) - } - - _, err = client.BytesFromStackItem(items[1]) - if err != nil { - return nil, fmt.Errorf("parse public key item: %w", err) - } - - var res PutSuccess - - err = res.ID.Decode(binID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - return res, nil + var cnr cid.ID + cnr.SetSHA256(pse.ContainerID) + return PutSuccess{ + ID: cnr, + }, nil } diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go index 3622f9943..dd5c7ea93 100644 --- a/pkg/morph/event/container/put_test.go +++ b/pkg/morph/event/container/put_test.go @@ -4,8 +4,8 @@ import ( "crypto/sha256" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) { } _, err := ParsePutSuccess(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong container ID parameter", func(t *testing.T) { @@ -35,18 +35,30 @@ func TestParsePutSuccess(t *testing.T) { id.Encode(binID) t.Run("wrong public key parameter", func(t *testing.T) { - _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - stackitem.NewMap(), - })) + t.Run("wrong type", func(t *testing.T) { + _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ + stackitem.NewByteArray(binID), + stackitem.NewMap(), + })) - require.Error(t, err) + require.Error(t, err) + }) + t.Run("garbage data", func(t *testing.T) { + _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ + stackitem.NewByteArray(binID), + stackitem.NewByteArray([]byte("key")), + })) + require.Error(t, err) + }) }) t.Run("correct behavior", func(t *testing.T) { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ stackitem.NewByteArray(binID), - stackitem.NewByteArray([]byte("key")), + stackitem.NewByteArray(pk.PublicKey().Bytes()), })) require.NoError(t, err) diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go index eae2a23f5..cf56464b8 100644 --- a/pkg/morph/event/frostfs/cheque.go +++ b/pkg/morph/event/frostfs/cheque.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -34,53 +34,20 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue } // ParseCheque from notification into cheque structure. func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Cheque - err error - ) + var ce frostfs.ChequeEvent + if err := ce.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err) + } - params, err := event.ParseStackArray(e) + lock, err := util.Uint160DecodeBytesBE(ce.LockAccount) if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err) } - if ln := len(params); ln != 4 { - return nil, event.WrongNumberOfParameters(4, ln) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get cheque id: %w", err) - } - - // parse user - user, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get cheque user: %w", err) - } - - ev.UserValue, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get cheque amount: %w", err) - } - - // parse lock account - lock, err := client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get cheque lock account: %w", err) - } - - ev.LockValue, err = util.Uint160DecodeBytesBE(lock) - if err != nil { - return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err) - } - - return ev, nil + return Cheque{ + IDValue: ce.Id, + AmountValue: ce.Amount.Int64(), + UserValue: ce.User, + LockValue: lock, + }, nil } diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go index ab177757f..d92b7922b 100644 --- a/pkg/morph/event/frostfs/cheque_test.go +++ b/pkg/morph/event/frostfs/cheque_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -27,7 +26,7 @@ func TestParseCheque(t *testing.T) { } _, err := ParseCheque(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong id parameter", func(t *testing.T) { diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go index 4c87634c2..805e80f3c 100644 --- a/pkg/morph/event/frostfs/config.go +++ b/pkg/morph/event/frostfs/config.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -36,39 +36,15 @@ func (u Config) Key() []byte { return u.KeyValue } func (u Config) Value() []byte { return u.ValueValue } func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Config - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var sce frostfs.SetConfigEvent + if err := sce.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err) } - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get config update id: %w", err) - } - - // parse key - ev.KeyValue, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get config key: %w", err) - } - - // parse value - ev.ValueValue, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get config value: %w", err) - } - - ev.TxHashValue = e.Container - - return ev, nil + return Config{ + KeyValue: sce.Key, + ValueValue: sce.Value, + IDValue: sce.Id, + TxHashValue: e.Container, + }, nil } diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go index dcd4201e4..8acc8c15c 100644 --- a/pkg/morph/event/frostfs/config_test.go +++ b/pkg/morph/event/frostfs/config_test.go @@ -3,7 +3,6 @@ package frostfs import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -21,7 +20,7 @@ func TestParseConfig(t *testing.T) { } _, err := ParseConfig(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong first parameter", func(t *testing.T) { diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go index d8a3b82f0..fcb01577e 100644 --- a/pkg/morph/event/frostfs/deposit.go +++ b/pkg/morph/event/frostfs/deposit.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -34,50 +34,15 @@ func (d Deposit) Amount() int64 { return d.AmountValue } // ParseDeposit notification into deposit structure. func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { - var ev Deposit - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var de frostfs.DepositEvent + if err := de.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err) } - if ln := len(params); ln != 4 { - return nil, event.WrongNumberOfParameters(4, ln) - } - - // parse from - from, err := client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get deposit sender: %w", err) - } - - ev.FromValue, err = util.Uint160DecodeBytesBE(from) - if err != nil { - return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get deposit amount: %w", err) - } - - // parse to - to, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get deposit receiver: %w", err) - } - - ev.ToValue, err = util.Uint160DecodeBytesBE(to) - if err != nil { - return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get deposit id: %w", err) - } - - return ev, nil + return Deposit{ + IDValue: de.TxHash[:], + AmountValue: de.Amount.Int64(), + FromValue: de.From, + ToValue: de.Receiver, + }, nil } diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go index f279a7f9c..38d3e61f6 100644 --- a/pkg/morph/event/frostfs/deposit_test.go +++ b/pkg/morph/event/frostfs/deposit_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -12,7 +11,7 @@ import ( func TestParseDeposit(t *testing.T) { var ( - id = []byte("Hello World") + id = util.Uint256{0, 1, 2, 3} from = util.Uint160{0x1, 0x2, 0x3} to = util.Uint160{0x3, 0x2, 0x1} @@ -26,7 +25,7 @@ func TestParseDeposit(t *testing.T) { } _, err := ParseDeposit(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong from parameter", func(t *testing.T) { @@ -72,12 +71,12 @@ func TestParseDeposit(t *testing.T) { stackitem.NewByteArray(from.BytesBE()), stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), stackitem.NewByteArray(to.BytesBE()), - stackitem.NewByteArray(id), + stackitem.NewByteArray(id[:]), })) require.NoError(t, err) require.Equal(t, Deposit{ - IDValue: id, + IDValue: id[:], AmountValue: amount, FromValue: from, ToValue: to, diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go deleted file mode 100644 index 62203540f..000000000 --- a/pkg/morph/event/frostfs/ir_update.go +++ /dev/null @@ -1,54 +0,0 @@ -package frostfs - -import ( - "crypto/elliptic" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -type UpdateInnerRing struct { - keys []*keys.PublicKey -} - -// MorphEvent implements Neo:Morph Event interface. -func (UpdateInnerRing) MorphEvent() {} - -func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys } - -func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) { - var ( - ev UpdateInnerRing - err error - ) - - if ln := len(params); ln != 1 { - return nil, event.WrongNumberOfParameters(1, ln) - } - - // parse keys - irKeys, err := client.ArrayFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get updated inner ring keys: %w", err) - } - - ev.keys = make([]*keys.PublicKey, 0, len(irKeys)) - for i := range irKeys { - rawKey, err := client.BytesFromStackItem(irKeys[i]) - if err != nil { - return nil, fmt.Errorf("could not get updated inner ring public key: %w", err) - } - - key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err) - } - - ev.keys = append(ev.keys, key) - } - - return ev, nil -} diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go deleted file mode 100644 index fae87e5f9..000000000 --- a/pkg/morph/event/frostfs/ir_update_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package frostfs - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func genKey(t *testing.T) *keys.PrivateKey { - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - return priv -} - -func TestParseUpdateInnerRing(t *testing.T) { - publicKeys := []*keys.PublicKey{ - genKey(t).PublicKey(), - genKey(t).PublicKey(), - genKey(t).PublicKey(), - } - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseUpdateInnerRing(prms) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong first parameter", func(t *testing.T) { - _, err := ParseUpdateInnerRing([]stackitem.Item{ - stackitem.NewMap(), - }) - - require.Error(t, err) - }) - - t.Run("correct", func(t *testing.T) { - ev, err := ParseUpdateInnerRing([]stackitem.Item{ - stackitem.NewArray([]stackitem.Item{ - stackitem.NewByteArray(publicKeys[0].Bytes()), - stackitem.NewByteArray(publicKeys[1].Bytes()), - stackitem.NewByteArray(publicKeys[2].Bytes()), - }), - }) - require.NoError(t, err) - - require.Equal(t, UpdateInnerRing{ - keys: publicKeys, - }, ev) - }) -} diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go index f48067f86..2568b6512 100644 --- a/pkg/morph/event/frostfs/withdraw.go +++ b/pkg/morph/event/frostfs/withdraw.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -30,39 +30,14 @@ func (w Withdraw) Amount() int64 { return w.AmountValue } // ParseWithdraw notification into withdraw structure. func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) { - var ev Withdraw - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var we frostfs.WithdrawEvent + if err := we.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err) } - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse user - user, err := client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw user: %w", err) - } - - ev.UserValue, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw amount: %w", err) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw id: %w", err) - } - - return ev, nil + return Withdraw{ + IDValue: we.TxHash[:], + AmountValue: we.Amount.Int64(), + UserValue: we.User, + }, nil } diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go index 33435d19a..e382305e6 100644 --- a/pkg/morph/event/frostfs/withdraw_test.go +++ b/pkg/morph/event/frostfs/withdraw_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -12,7 +11,7 @@ import ( func TestParseWithdraw(t *testing.T) { var ( - id = []byte("Hello World") + id = util.Uint256{1, 2, 3} user = util.Uint160{0x1, 0x2, 0x3} amount int64 = 10 @@ -25,7 +24,7 @@ func TestParseWithdraw(t *testing.T) { } _, err := ParseWithdraw(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong user parameter", func(t *testing.T) { @@ -59,12 +58,12 @@ func TestParseWithdraw(t *testing.T) { ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ stackitem.NewByteArray(user.BytesBE()), stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(id), + stackitem.NewByteArray(id[:]), })) require.NoError(t, err) require.Equal(t, Withdraw{ - IDValue: id, + IDValue: id[:], AmountValue: amount, UserValue: user, }, ev) diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index 822335329..55a514ff1 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -4,6 +4,7 @@ import ( "context" "github.com/nspcc-dev/neo-go/pkg/core/block" + "github.com/nspcc-dev/neo-go/pkg/util" ) // Handler is an Event processing function. @@ -16,19 +17,10 @@ type BlockHandler func(context.Context, *block.Block) // the parameters of the handler of particular // contract event. type NotificationHandlerInfo struct { - scriptHashWithType - - h Handler -} - -// SetHandler is an event handler setter. -func (s *NotificationHandlerInfo) SetHandler(v Handler) { - s.h = v -} - -// Handler returns an event handler. -func (s NotificationHandlerInfo) Handler() Handler { - return s.h + Contract util.Uint160 + Type Type + Parser NotificationParser + Handlers []Handler } // NotaryHandlerInfo is a structure that groups diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 6e6184e77..e5cdfeef7 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -33,13 +33,6 @@ type Listener interface { // it could not be started. ListenWithError(context.Context, chan<- error) - // SetNotificationParser must set the parser of particular contract event. - // - // Parser of each event must be set once. All parsers must be set before Listen call. - // - // Must ignore nil parsers and all calls after listener has been started. - SetNotificationParser(NotificationParserInfo) - // RegisterNotificationHandler must register the event handler for particular notification event of contract. // // The specified handler must be called after each capture and parsing of the event. @@ -100,8 +93,6 @@ type listener struct { startOnce, stopOnce sync.Once - started bool - notificationParsers map[scriptHashWithType]NotificationParser notificationHandlers map[scriptHashWithType][]Handler @@ -120,7 +111,7 @@ type listener struct { pool *ants.Pool } -const newListenerFailMsg = "could not instantiate Listener" +const newListenerFailMsg = "instantiate Listener" var ( errNilLogger = errors.New("nil logger") @@ -143,11 +134,8 @@ func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - if err := l.listen(ctx, nil); err != nil { - l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, - zap.String("error", err.Error()), - ) - } + + l.listen(ctx, nil) }) } @@ -161,26 +149,17 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - if err := l.listen(ctx, intError); err != nil { - l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, - zap.String("error", err.Error()), - ) - l.sendError(ctx, intError, err) - } + + l.listen(ctx, intError) }) } -func (l *listener) listen(ctx context.Context, intError chan<- error) error { - // mark listener as started - l.started = true - +func (l *listener) listen(ctx context.Context, intError chan<- error) { subErrCh := make(chan error) go l.subscribe(subErrCh) l.listenLoop(ctx, intError, subErrCh) - - return nil } func (l *listener) subscribe(errCh chan error) { @@ -192,7 +171,7 @@ func (l *listener) subscribe(errCh chan error) { // fill the list with the contracts with set event parsers. l.mtx.RLock() for hashType := range l.notificationParsers { - scHash := hashType.ScriptHash() + scHash := hashType.Hash // prevent repetitions for _, hash := range hashes { @@ -201,26 +180,26 @@ func (l *listener) subscribe(errCh chan error) { } } - hashes = append(hashes, hashType.ScriptHash()) + hashes = append(hashes, hashType.Hash) } l.mtx.RUnlock() err := l.subscriber.SubscribeForNotification(hashes...) if err != nil { - errCh <- fmt.Errorf("could not subscribe for notifications: %w", err) + errCh <- fmt.Errorf("subscribe for notifications: %w", err) return } if len(l.blockHandlers) > 0 { if err = l.subscriber.BlockNotifications(); err != nil { - errCh <- fmt.Errorf("could not subscribe for blocks: %w", err) + errCh <- fmt.Errorf("subscribe for blocks: %w", err) return } } if l.listenNotary { if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil { - errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err) + errCh <- fmt.Errorf("subscribe for notary requests: %w", err) return } } @@ -338,9 +317,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent * ) // get the event parser - keyEvent := scriptHashWithType{} - keyEvent.SetScriptHash(notifyEvent.ScriptHash) - keyEvent.SetType(typEvent) + keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent} l.mtx.RLock() parser, ok := l.notificationParsers[keyEvent] @@ -356,7 +333,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent * event, err := parser(notifyEvent) if err != nil { log.Warn(ctx, logs.EventCouldNotParseNotificationEvent, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -389,13 +366,13 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe case errors.Is(err, ErrTXAlreadyHandled): case errors.As(err, &expErr): l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent, - zap.String("error", err.Error()), + zap.Error(err), zap.Uint32("current_block_height", expErr.CurrentBlockHeight), zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight), ) default: l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent, - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -427,7 +404,7 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe event, err := parser(notaryEvent) if err != nil { log.Warn(ctx, logs.EventCouldNotParseNotaryEvent, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -449,72 +426,27 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe handler(ctx, event) } -// SetNotificationParser sets the parser of particular contract event. -// -// Ignores nil and already set parsers. -// Ignores the parser if listener is started. -func (l *listener) SetNotificationParser(pi NotificationParserInfo) { - log := l.log.With( - zap.String("contract", pi.ScriptHash().StringLE()), - zap.Stringer("event_type", pi.getType()), - ) - - parser := pi.parser() - if parser == nil { - log.Info(context.Background(), logs.EventIgnoreNilEventParser) - return - } - - l.mtx.Lock() - defer l.mtx.Unlock() - - // check if the listener was started - if l.started { - log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreParser) - return - } - - // add event parser - if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok { - l.notificationParsers[pi.scriptHashWithType] = pi.parser() - } - - log.Debug(context.Background(), logs.EventRegisteredNewEventParser) -} - // RegisterNotificationHandler registers the handler for particular notification event of contract. // // Ignores nil handlers. // Ignores handlers of event without parser. func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { log := l.log.With( - zap.String("contract", hi.ScriptHash().StringLE()), - zap.Stringer("event_type", hi.GetType()), + zap.String("contract", hi.Contract.StringLE()), + zap.Stringer("event_type", hi.Type), ) - handler := hi.Handler() - if handler == nil { - log.Warn(context.Background(), logs.EventIgnoreNilEventHandler) - return - } - // check if parser was set - l.mtx.RLock() - _, ok := l.notificationParsers[hi.scriptHashWithType] - l.mtx.RUnlock() - - if !ok { - log.Warn(context.Background(), logs.EventIgnoreHandlerOfEventWoParser) - return - } - - // add event handler l.mtx.Lock() - l.notificationHandlers[hi.scriptHashWithType] = append( - l.notificationHandlers[hi.scriptHashWithType], - hi.Handler(), + defer l.mtx.Unlock() + + k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type} + + l.notificationParsers[k] = hi.Parser + l.notificationHandlers[k] = append( + l.notificationHandlers[k], + hi.Handlers..., ) - l.mtx.Unlock() log.Debug(context.Background(), logs.EventRegisteredNewEventHandler) } @@ -555,21 +487,9 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { zap.Stringer("notary_type", pi.RequestType()), ) - parser := pi.parser() - if parser == nil { - log.Info(context.Background(), logs.EventIgnoreNilNotaryEventParser) - return - } - l.mtx.Lock() defer l.mtx.Unlock() - // check if the listener was started - if l.started { - log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) - return - } - // add event parser if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok { l.notaryParsers[pi.notaryRequestTypes] = pi.parser() @@ -593,12 +513,6 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { zap.Stringer("notary type", hi.RequestType()), ) - handler := hi.Handler() - if handler == nil { - log.Warn(context.Background(), logs.EventIgnoreNilNotaryEventHandler) - return - } - // check if parser was set l.mtx.RLock() _, ok := l.notaryParsers[hi.notaryRequestTypes] @@ -627,11 +541,6 @@ func (l *listener) Stop() { } func (l *listener) RegisterBlockHandler(handler BlockHandler) { - if handler == nil { - l.log.Warn(context.Background(), logs.EventIgnoreNilBlockHandler) - return - } - l.blockHandlers = append(l.blockHandlers, handler) } @@ -648,7 +557,7 @@ func NewListener(p ListenerParams) (Listener, error) { // The default capacity is 0, which means "infinite". pool, err := ants.NewPool(p.WorkerPoolCapacity) if err != nil { - return nil, fmt.Errorf("could not init worker pool: %w", err) + return nil, fmt.Errorf("init worker pool: %w", err) } return &listener{ diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index c0f9722d7..87f37305f 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -39,29 +39,19 @@ func TestEventHandling(t *testing.T) { blockHandled <- true }) - key := scriptHashWithType{ - scriptHashValue: scriptHashValue{ - hash: util.Uint160{100}, - }, - typeValue: typeValue{ - typ: TypeFromString("notification type"), - }, - } - - l.SetNotificationParser(NotificationParserInfo{ - scriptHashWithType: key, - p: func(cne *state.ContainedNotificationEvent) (Event, error) { - return testNotificationEvent{source: cne}, nil - }, - }) - notificationHandled := make(chan bool) handledNotifications := make([]Event, 0) l.RegisterNotificationHandler(NotificationHandlerInfo{ - scriptHashWithType: key, - h: func(_ context.Context, e Event) { - handledNotifications = append(handledNotifications, e) - notificationHandled <- true + Contract: util.Uint160{100}, + Type: TypeFromString("notification type"), + Parser: func(cne *state.ContainedNotificationEvent) (Event, error) { + return testNotificationEvent{source: cne}, nil + }, + Handlers: []Handler{ + func(_ context.Context, e Event) { + handledNotifications = append(handledNotifications, e) + notificationHandled <- true + }, }, }) diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go index e454e2a6a..39c8f6237 100644 --- a/pkg/morph/event/netmap/epoch.go +++ b/pkg/morph/event/netmap/epoch.go @@ -1,9 +1,7 @@ package netmap import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -37,22 +35,13 @@ func (s NewEpoch) TxHash() util.Uint256 { // // Result is type of NewEpoch. func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != 1 { - return nil, event.WrongNumberOfParameters(1, ln) - } - - prmEpochNum, err := client.IntFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get integer epoch number: %w", err) + var nee netmap.NewEpochEvent + if err := nee.FromStackItem(e.Item); err != nil { + return nil, err } return NewEpoch{ - Num: uint64(prmEpochNum), + Num: nee.Epoch.Uint64(), Hash: e.Container, }, nil } diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go index bc267ecb6..6ff692327 100644 --- a/pkg/morph/event/netmap/epoch_test.go +++ b/pkg/morph/event/netmap/epoch_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -17,7 +16,7 @@ func TestParseNewEpoch(t *testing.T) { } _, err := ParseNewEpoch(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong first parameter type", func(t *testing.T) { diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go index 0260810b8..993182ab4 100644 --- a/pkg/morph/event/netmap/update_peer_notary.go +++ b/pkg/morph/event/netmap/update_peer_notary.go @@ -10,7 +10,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/vm/opcode" ) -var errNilPubKey = errors.New("could not parse public key: public key is nil") +var errNilPubKey = errors.New("public key is nil") func (s *UpdatePeer) setPublicKey(v []byte) (err error) { if v == nil { @@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) { s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256()) if err != nil { - return fmt.Errorf("could not parse public key: %w", err) + return fmt.Errorf("parse public key: %w", err) } return diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index 37091f768..b11973646 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { for { opCode, param, err = ctx.Next() if err != nil { - return nil, fmt.Errorf("could not get next opcode in script: %w", err) + return nil, fmt.Errorf("get next opcode in script: %w", err) } if opCode == opcode.RET { @@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { // retrieve contract's script hash contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param) if err != nil { - return nil, fmt.Errorf("could not decode contract hash: %w", err) + return nil, fmt.Errorf("decode contract hash: %w", err) } // retrieve contract's method @@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { if len(args) != 0 { err = p.validateParameterOpcodes(args) if err != nil { - return nil, fmt.Errorf("could not validate arguments: %w", err) + return nil, fmt.Errorf("validate arguments: %w", err) } // without args packing opcodes @@ -199,14 +199,14 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { // neo-go API) // // this check prevents notary flow recursion - if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || - bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version + if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && + !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version return ErrTXAlreadyHandled } currentAlphabet, err := p.alphaKeys() if err != nil { - return fmt.Errorf("could not fetch Alphabet public keys: %w", err) + return fmt.Errorf("fetch Alphabet public keys: %w", err) } err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet) @@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error { argsLen, err := IntFromOpcode(ops[l-2]) if err != nil { - return fmt.Errorf("could not parse argument len: %w", err) + return fmt.Errorf("parse argument len: %w", err) } err = validateNestedArgs(argsLen, ops[:l-2]) @@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error { argsLen, err := IntFromOpcode(ops[i-1]) if err != nil { - return fmt.Errorf("could not parse argument len: %w", err) + return fmt.Errorf("parse argument len: %w", err) } expArgLen += argsLen + 1 @@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error { currBlock, err := p.blockCounter.BlockCount() if err != nil { - return fmt.Errorf("could not fetch current chain height: %w", err) + return fmt.Errorf("fetch current chain height: %w", err) } if currBlock >= nvb.Height { @@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) if err != nil { - return fmt.Errorf("could not get Alphabet verification script: %w", err) + return fmt.Errorf("get Alphabet verification script: %w", err) } if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) { @@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) if err != nil { - return fmt.Errorf("could not get Alphabet verification script: %w", err) + return fmt.Errorf("get Alphabet verification script: %w", err) } // the second one must be witness of the current @@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 - bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 + !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go index 90eff0bd2..5adeb4b30 100644 --- a/pkg/morph/event/parsers.go +++ b/pkg/morph/event/parsers.go @@ -11,15 +11,6 @@ import ( // from the StackItem list. type NotificationParser func(*state.ContainedNotificationEvent) (Event, error) -// NotificationParserInfo is a structure that groups -// the parameters of particular contract -// notification event parser. -type NotificationParserInfo struct { - scriptHashWithType - - p NotificationParser -} - // NotaryPreparator constructs NotaryEvent // from the NotaryRequest event. type NotaryPreparator interface { @@ -47,24 +38,6 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) { n.p = p } -// SetParser is an event parser setter. -func (s *NotificationParserInfo) SetParser(v NotificationParser) { - s.p = v -} - -func (s NotificationParserInfo) parser() NotificationParser { - return s.p -} - -// SetType is an event type setter. -func (s *NotificationParserInfo) SetType(v Type) { - s.typ = v -} - -func (s NotificationParserInfo) getType() Type { - return s.typ -} - type wrongPrmNumber struct { exp, act int } diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go index 28c968046..b384e436b 100644 --- a/pkg/morph/event/rolemanagement/designate.go +++ b/pkg/morph/event/rolemanagement/designate.go @@ -26,7 +26,7 @@ func (Designate) MorphEvent() {} func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) { params, err := event.ParseStackArray(e) if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + return nil, fmt.Errorf("parse stack items from notify event: %w", err) } if len(params) != 2 { diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 99ea9a7f0..0088be400 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -20,13 +20,9 @@ type scriptHashValue struct { hash util.Uint160 } -type typeValue struct { - typ Type -} - type scriptHashWithType struct { - scriptHashValue - typeValue + Hash util.Uint160 + Type Type } type notaryRequestTypes struct { @@ -73,16 +69,6 @@ func (s scriptHashValue) ScriptHash() util.Uint160 { return s.hash } -// SetType is an event type setter. -func (s *typeValue) SetType(v Type) { - s.typ = v -} - -// GetType is an event type getter. -func (s typeValue) GetType() Type { - return s.typ -} - // WorkerPoolHandler sets closure over worker pool w with passed handler h. func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler { return func(ctx context.Context, e Event) { @@ -91,7 +77,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle }) if err != nil { log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool, - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/network/address.go b/pkg/network/address.go index cb83a813d..4643eef15 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -2,11 +2,11 @@ package network import ( "errors" - "fmt" "net" "net/url" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -44,11 +44,9 @@ func (a Address) equal(addr Address) bool { // See also FromString. func (a Address) URIAddr() string { _, host, err := manet.DialArgs(a.ma) - if err != nil { - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - panic(fmt.Errorf("could not get host addr: %w", err)) - } + // the only correct way to construct Address is AddressFromString + // which makes this error appear unexpected + assert.NoError(err, "could not get host addr") if !a.IsTLSEnabled() { return host diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 481d1ea4a..54c1e18fb 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -7,10 +7,12 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -62,12 +64,16 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpcOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), @@ -155,7 +161,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie group.IterateAddresses(func(addr network.Address) bool { select { case <-ctx.Done(): - firstErr = context.Canceled + firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled) return true default: } diff --git a/pkg/network/group.go b/pkg/network/group.go index 9843b14d4..0044fb2d4 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,6 +3,8 @@ package network import ( "errors" "fmt" + "iter" + "slices" "sort" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -67,9 +69,8 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // IterateAddresses must iterate over network addresses and pass each one - // to the handler until it returns true. - IterateAddresses(func(string) bool) + // Addresses must return an iterator over network addresses. + Addresses() iter.Seq[string] // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int @@ -130,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { // iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f // until 1st parsing failure or f's error. func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - iter.IterateAddresses(func(s string) bool { + for s := range iter.Addresses() { var a Address err = a.FromString(s) if err != nil { - err = fmt.Errorf("could not parse address from string: %w", err) - return true + return fmt.Errorf("could not parse address from string: %w", err) } err = f(a) - - return err != nil - }) + if err != nil { + return err + } + } return } @@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) { // at least one common address. func (x AddressGroup) Intersects(x2 AddressGroup) bool { for i := range x { - for j := range x2 { - if x[i].equal(x2[j]) { - return true - } + if slices.ContainsFunc(x2, x[i].equal) { + return true } } diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go index 5b335fa52..d08264533 100644 --- a/pkg/network/group_test.go +++ b/pkg/network/group_test.go @@ -1,6 +1,8 @@ package network import ( + "iter" + "slices" "sort" "testing" @@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) { type testIterator []string -func (t testIterator) IterateAddresses(f func(string) bool) { - for i := range t { - f(t[i]) - } +func (t testIterator) Addresses() iter.Seq[string] { + return slices.Values(t) } func (t testIterator) NumberOfAddresses() int { diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go index 49d083a90..8cbf8d9c3 100644 --- a/pkg/network/transport/container/grpc/service.go +++ b/pkg/network/transport/container/grpc/service.go @@ -80,3 +80,26 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil } + +type containerStreamerV2 struct { + containerGRPC.ContainerService_ListStreamServer +} + +func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error { + return s.ContainerService_ListStreamServer.Send( + resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse), + ) +} + +// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data +// to gRPC stream. +func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error { + listReq := new(container.ListStreamRequest) + if err := listReq.FromGRPCMessage(req); err != nil { + return err + } + + return s.srv.ListStream(listReq, &containerStreamerV2{ + ContainerService_ListStreamServer: gStream, + }) +} diff --git a/pkg/network/validation.go b/pkg/network/validation.go index 92f650119..b5157f28f 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -2,6 +2,7 @@ package network import ( "errors" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -34,8 +35,8 @@ var ( // MultiAddressIterator. type NodeEndpointsIterator netmap.NodeInfo -func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) +func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() } func (x NodeEndpointsIterator) NumberOfAddresses() int { diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go index b77d3e3e6..6c2df8428 100644 --- a/pkg/services/accounting/morph/executor.go +++ b/pkg/services/accounting/morph/executor.go @@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor { } } -func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { +func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing account") @@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceReque return nil, fmt.Errorf("invalid account: %w", err) } - amount, err := s.client.BalanceOf(id) + amount, err := s.client.BalanceOf(ctx, id) if err != nil { return nil, err } - balancePrecision, err := s.client.Decimals() + balancePrecision, err := s.client.Decimals(ctx) if err != nil { return nil, err } diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go index e64f9a8d1..1d485321c 100644 --- a/pkg/services/apemanager/errors/errors.go +++ b/pkg/services/apemanager/errors/errors.go @@ -9,3 +9,9 @@ func ErrAPEManagerAccessDenied(reason string) error { err.WriteReason(reason) return err } + +func ErrAPEManagerInvalidArgument(msg string) error { + err := new(apistatus.InvalidArgument) + err.SetMessage(msg) + return err +} diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index cc792e23d..fc08fe569 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -22,6 +22,7 @@ import ( policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/mr-tron/base58/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" "go.uber.org/zap" ) @@ -34,6 +35,8 @@ type cfg struct { type Service struct { cfg + waiter Waiter + cnrSrc containercore.Source contractStorage ape_contract.ProxyAdaptedContractStorage @@ -41,11 +44,17 @@ type Service struct { type Option func(*cfg) -func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service { +type Waiter interface { + WaitTxHalt(context.Context, uint32, util.Uint256) error +} + +func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service { s := &Service{ cnrSrc: cnrSrc, contractStorage: contractStorage, + + waiter: waiter, } for i := range opts { @@ -69,12 +78,12 @@ var _ Server = (*Service)(nil) // validateContainerTargetRequest validates request for the container target. // It checks if request actor is the owner of the container, otherwise it denies the request. -func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error { +func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { var cidSDK cidSDK.ID if err := cidSDK.DecodeString(cid); err != nil { - return fmt.Errorf("invalid CID format: %w", err) + return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err)) } - isOwner, err := s.isActorContainerOwner(cidSDK, pubKey) + isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) if err != nil { return fmt.Errorf("failed to check owner: %w", err) } @@ -84,7 +93,7 @@ func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.Public return nil } -func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { +func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -92,7 +101,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw()) if err != nil { - return nil, err + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error()) } if len(chain.ID) == 0 { const randomIDLength = 10 @@ -108,15 +117,19 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } - if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil { + txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) + if err != nil { + return nil, err + } + if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { return nil, err } @@ -129,7 +142,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) return resp, nil } -func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { +func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -140,15 +153,19 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } - if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil { + txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) + if err != nil { + return nil, err + } + if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { return nil, err } @@ -160,7 +177,7 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe return resp, nil } -func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { +func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -171,12 +188,12 @@ func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequ switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target) @@ -210,23 +227,23 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK } sig := vh.GetBodySignature() if sig == nil { - return nil, errEmptyBodySignature + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error()) } key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) if err != nil { - return nil, fmt.Errorf("invalid signature key: %w", err) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err)) } return key, nil } -func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { +func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - cnr, err := s.cnrSrc.Get(cid) + cnr, err := s.cnrSrc.Get(ctx, cid) if err != nil { return false, fmt.Errorf("get container error: %w", err) } diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index eb4fd03c7..eb6263320 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -1,6 +1,7 @@ package ape import ( + "context" "crypto/ecdsa" "errors" "fmt" @@ -11,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" @@ -20,7 +20,6 @@ import ( ) var ( - errInvalidTargetType = errors.New("bearer token defines non-container target override") errBearerExpired = errors.New("bearer token has expired") errBearerInvalidSignature = errors.New("bearer token has invalid signature") errBearerInvalidContainerID = errors.New("bearer token was created for another container") @@ -49,7 +48,7 @@ type CheckPrm struct { // CheckCore provides methods to perform the common logic of APE check. type CheckCore interface { // CheckAPE performs the common policy-engine check logic on a prepared request. - CheckAPE(prm CheckPrm) error + CheckAPE(ctx context.Context, prm CheckPrm) error } type checkerCoreImpl struct { @@ -71,22 +70,30 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora } // CheckAPE performs the common policy-engine check logic on a prepared request. -func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { +func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { var cr policyengine.ChainRouter - if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { + if prm.BearerToken != nil { var err error if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { return fmt.Errorf("bearer validation error: %w", err) } - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) + if prm.BearerToken.Impersonate() { + cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) + } else { + override, isSet := prm.BearerToken.APEOverride() + if !isSet { + return errors.New("expected for override within bearer") + } + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) + if err != nil { + return fmt.Errorf("create chain router error: %w", err) + } } } else { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) } - groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey) + groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -104,14 +111,7 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { if found && status == apechain.Allow { return nil } - err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String()) - return apeErr(err) -} - -func apeErr(err error) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(err.Error()) - return errAccessDenied + return newChainRouterError(prm.Request.Operation(), status) } // isValidBearer checks whether bearer token was correctly signed by authorized @@ -133,19 +133,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe } // Check for ape overrides defined in the bearer token. - apeOverride := token.APEOverride() - if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { - return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) - } - - // Then check if container is either empty or equal to the container in the request. - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID + if apeOverride, isSet := token.APEOverride(); isSet { + switch apeOverride.Target.TargetType { + case ape.TargetTypeContainer: + var targetCnr cid.ID + err := targetCnr.DecodeString(apeOverride.Target.Name) + if err != nil { + return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) + } + if !cntID.Equals(targetCnr) { + return errBearerInvalidContainerID + } + default: + } } // Then check if container owner signed this token. @@ -157,8 +157,16 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe var usrSender user.ID user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner + // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's + // public key, but not the actual sender. + if !token.Impersonate() { + if !token.AssertUser(usrSender) { + return errBearerInvalidOwner + } + } else { + if !bearer.ResolveIssuer(*token).Equals(usrSender) { + return errBearerInvalidOwner + } } return nil diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go new file mode 100644 index 000000000..d3c381de7 --- /dev/null +++ b/pkg/services/common/ape/error.go @@ -0,0 +1,33 @@ +package ape + +import ( + "fmt" + + apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" +) + +// ChainRouterError is returned when chain router validation prevents +// the APE request from being processed (no rule found, access denied, etc.). +type ChainRouterError struct { + operation string + status apechain.Status +} + +func (e *ChainRouterError) Error() string { + return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status()) +} + +func (e *ChainRouterError) Operation() string { + return e.operation +} + +func (e *ChainRouterError) Status() apechain.Status { + return e.status +} + +func newChainRouterError(operation string, status apechain.Status) *ChainRouterError { + return &ChainRouterError{ + operation: operation, + status: status, + } +} diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 2cdb30b45..3b5dab9aa 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -49,11 +49,11 @@ var ( ) type ir interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type containers interface { - Get(cid.ID) (*containercore.Container, error) + Get(context.Context, cid.ID) (*containercore.Container, error) } type apeChecker struct { @@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, err } @@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co } } - namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) + namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) if err != nil { return nil, fmt.Errorf("could not get owner namespace: %w", err) } - if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { + if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { return nil, err } @@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -175,11 +175,84 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co return nil, apeErr(nativeschema.MethodListContainers, s) } +func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error { + ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream") + defer span.End() + + role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + if err != nil { + return err + } + + reqProps := map[string]string{ + nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), + nativeschema.PropertyKeyActorRole: role, + } + + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) + if err != nil { + return err + } + if p, ok := peer.FromContext(ctx); ok { + if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { + reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() + } + } + + namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) + if err != nil { + return fmt.Errorf("could not get owner namespace: %w", err) + } + if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { + return err + } + + request := aperequest.NewRequest( + nativeschema.MethodListContainers, + aperequest.NewResource( + resourceName(namespace, ""), + make(map[string]string), + ), + reqProps, + ) + + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) + if err != nil { + return fmt.Errorf("failed to get group ids: %w", err) + } + + // Policy contract keeps group related chains as namespace-group pair. + for i := range groups { + groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) + } + + rt := policyengine.NewRequestTargetWithNamespace(namespace) + rt.User = &policyengine.Target{ + Type: policyengine.User, + Name: fmt.Sprintf("%s:%s", namespace, pk.Address()), + } + rt.Groups = make([]policyengine.Target, len(groups)) + for i := range groups { + rt.Groups[i] = policyengine.GroupTarget(groups[i]) + } + + s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request) + if err != nil { + return err + } + + if found && s == apechain.Allow { + return ac.next.ListStream(req, stream) + } + + return apeErr(nativeschema.MethodListContainers, s) +} + func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -189,7 +262,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, err } @@ -199,7 +272,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont } } - namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID()) + namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID()) if err != nil { return nil, fmt.Errorf("get namespace error: %w", err) } @@ -207,16 +280,21 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, err } + cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer()) + if err != nil { + return nil, fmt.Errorf("get container properties: %w", err) + } + request := aperequest.NewRequest( nativeschema.MethodPutContainer, aperequest.NewResource( resourceName(namespace, ""), - make(map[string]string), + cnrProps, ), reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -248,7 +326,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, apeErr(nativeschema.MethodPutContainer, s) } -func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { +func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { if vh == nil { return "", nil, errMissingVerificationHeader } @@ -271,7 +349,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(pkBytes) + isIR, err := ac.isInnerRingKey(ctx, pkBytes) if err != nil { return "", nil, err } @@ -292,7 +370,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con return err } - cont, err := ac.reader.Get(id) + cont, err := ac.reader.Get(ctx, id) if err != nil { return err } @@ -308,7 +386,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con namespace = cntNamespace } - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -322,7 +400,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con op, aperequest.NewResource( resourceName(namespace, id.EncodeToString()), - ac.getContainerProps(cont), + getContainerProps(cont), ), reqProps, ) @@ -372,10 +450,26 @@ func resourceName(namespace string, container string) string { return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container) } -func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string { - return map[string]string{ +func getContainerProps(c *containercore.Container) map[string]string { + props := map[string]string{ nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(), } + for attrName, attrVal := range c.Value.Attributes() { + name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName) + props[name] = attrVal + } + return props +} + +func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) { + if cnrV2 == nil { + return nil, errors.New("container is not set") + } + c := cnrSDK.Container{} + if err := c.ReadFromV2(*cnrV2); err != nil { + return nil, err + } + return getContainerProps(&containercore.Container{Value: c}), nil } func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, @@ -385,7 +479,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe if err != nil { return nil, nil, err } - role, err := ac.getRole(actor, pk, cont, cnrID) + role, err := ac.getRole(ctx, actor, pk, cont, cnrID) if err != nil { return nil, nil, err } @@ -393,7 +487,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, nil, err } @@ -405,13 +499,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe return reqProps, pk, nil } -func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { +func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { if cont.Value.Owner().Equals(*actor) { return nativeschema.PropertyValueContainerRoleOwner, nil } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(pkBytes) + isIR, err := ac.isInnerRingKey(ctx, pkBytes) if err != nil { return "", err } @@ -419,7 +513,7 @@ func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containe return nativeschema.PropertyValueContainerRoleIR, nil } - isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont) + isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont) if err != nil { return "", err } @@ -513,8 +607,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { - innerRingKeys, err := ac.ir.InnerRingKeys() +func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) { + innerRingKeys, err := ac.ir.InnerRingKeys(ctx) if err != nil { return false, err } @@ -528,11 +622,11 @@ func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { return false, nil } -func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { +func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { binCnrID := make([]byte, sha256.Size) cnrID.Encode(binCnrID) - nm, err := netmap.GetLatestNetworkMap(ac.nm) + nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm) if err != nil { return false, err } @@ -543,7 +637,7 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = netmap.GetPreviousNetworkMap(ac.nm) + nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm) if err != nil { return false, err } @@ -568,7 +662,7 @@ func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containerc return false } -func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -576,24 +670,19 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr, err := ownerSDK.ScriptHash() - if err != nil { - return "", err - } + addr := ownerSDK.ScriptHash() namespace := "" - subject, err := ac.frostFSIDClient.GetSubject(addr) + subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err == nil { namespace = subject.Namespace - } else { - if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) - } + } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { + return "", fmt.Errorf("get subject error: %w", err) } return namespace, nil } -func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -601,11 +690,8 @@ func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr, err := ownerSDK.ScriptHash() - if err != nil { - return "", err - } - subject, err := ac.frostFSIDClient.GetSubject(addr) + addr := ownerSDK.ScriptHash() + subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err != nil { return "", fmt.Errorf("get subject error: %w", err) } @@ -639,12 +725,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro // validateNamespace validates if a namespace of a request actor equals to owner's namespace. // An actor's namespace is calculated by a public key. -func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error { +func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - actorNamespace, err := ac.namespaceByOwner(actorOwnerID) + actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID) if err != nil { return fmt.Errorf("could not get actor namespace: %w", err) } @@ -655,11 +741,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNa } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { +func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index b6b42a559..6438c34ca 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -54,6 +54,8 @@ func TestAPE(t *testing.T) { t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace) t.Run("deny list containers for owner with PK", testDenyListContainersForPK) t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError) + t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr) + t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr) } const ( @@ -564,6 +566,185 @@ func testDenyGetContainerByIP(t *testing.T) { require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) } +func testDenyGetContainerSysZoneAttr(t *testing.T) { + t.Parallel() + srv := &srvStub{ + calls: map[string]int{}, + } + router := inmemory.NewInMemory() + contRdr := &containerStub{ + c: map[cid.ID]*containercore.Container{}, + } + ir := &irStub{ + keys: [][]byte{}, + } + nm := &netmapStub{} + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + pk.PublicKey().GetScriptHash(): { + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + }, + }, + subjectsExt: map[util.Uint160]*client.SubjectExtended{ + pk.PublicKey().GetScriptHash(): { + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + Groups: []*client.Group{ + { + ID: 19888, + }, + }, + }, + }, + } + + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) + + contID := cidtest.ID() + testContainer := containertest.Container() + pp := netmap.PlacementPolicy{} + require.NoError(t, pp.DecodeString("REP 1")) + testContainer.SetPlacementPolicy(pp) + testContainer.SetAttribute(container.SysAttributeZone, "eggplant") + contRdr.c[contID] = &containercore.Container{Value: testContainer} + + nm.currentEpoch = 100 + nm.netmaps = map[uint64]*netmap.NetMap{} + var testNetmap netmap.NetMap + testNetmap.SetEpoch(nm.currentEpoch) + testNetmap.SetNodes([]netmap.NodeInfo{{}}) + nm.netmaps[nm.currentEpoch] = &testNetmap + nm.netmaps[nm.currentEpoch-1] = &testNetmap + + _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{ + Names: []string{ + nativeschema.MethodGetContainer, + }, + }, + Resources: chain.Resources{ + Names: []string{ + fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), + }, + }, + Condition: []chain.Condition{ + { + Kind: chain.KindResource, + Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), + Value: "eggplant", + Op: chain.CondStringEquals, + }, + }, + }, + }, + }) + require.NoError(t, err) + + req := &container.GetRequest{} + req.SetBody(&container.GetRequestBody{}) + var refContID refs.ContainerID + contID.WriteToV2(&refContID) + req.GetBody().SetContainerID(&refContID) + + require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) + + resp, err := apeSrv.Get(ctxWithPeerInfo(), req) + require.Nil(t, resp) + var errAccessDenied *apistatus.ObjectAccessDenied + require.ErrorAs(t, err, &errAccessDenied) + require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) +} + +func testDenyPutContainerSysZoneAttr(t *testing.T) { + t.Parallel() + srv := &srvStub{ + calls: map[string]int{}, + } + router := inmemory.NewInMemory() + contRdr := &containerStub{ + c: map[cid.ID]*containercore.Container{}, + } + ir := &irStub{ + keys: [][]byte{}, + } + nm := &netmapStub{} + + contID := cidtest.ID() + testContainer := containertest.Container() + pp := netmap.PlacementPolicy{} + require.NoError(t, pp.DecodeString("REP 1")) + testContainer.SetPlacementPolicy(pp) + testContainer.SetAttribute(container.SysAttributeZone, "eggplant") + contRdr.c[contID] = &containercore.Container{Value: testContainer} + owner := testContainer.Owner() + ownerAddr := owner.ScriptHash() + + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + ownerAddr: {}, + }, + subjectsExt: map[util.Uint160]*client.SubjectExtended{ + ownerAddr: {}, + }, + } + + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) + + nm.currentEpoch = 100 + nm.netmaps = map[uint64]*netmap.NetMap{} + var testNetmap netmap.NetMap + testNetmap.SetEpoch(nm.currentEpoch) + testNetmap.SetNodes([]netmap.NodeInfo{{}}) + nm.netmaps[nm.currentEpoch] = &testNetmap + nm.netmaps[nm.currentEpoch-1] = &testNetmap + + _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{ + Names: []string{ + nativeschema.MethodPutContainer, + }, + }, + Resources: chain.Resources{ + Names: []string{ + nativeschema.ResourceFormatRootContainers, + }, + }, + Condition: []chain.Condition{ + { + Kind: chain.KindResource, + Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), + Value: "eggplant", + Op: chain.CondStringEquals, + }, + }, + }, + }, + }) + require.NoError(t, err) + + req := initPutRequest(t, testContainer) + + resp, err := apeSrv.Put(ctxWithPeerInfo(), req) + require.Nil(t, resp) + var errAccessDenied *apistatus.ObjectAccessDenied + require.ErrorAs(t, err, &errAccessDenied) + require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) +} + func testDenyGetContainerByGroupID(t *testing.T) { t.Parallel() srv := &srvStub{ @@ -678,8 +859,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { testContainer := containertest.Container() owner := testContainer.Owner() - ownerAddr, err := owner.ScriptHash() - require.NoError(t, err) + ownerAddr := owner.ScriptHash() frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ ownerAddr: {}, @@ -690,7 +870,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { nm.currentEpoch = 100 nm.netmaps = map[uint64]*netmap.NetMap{} - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ + _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ Rules: []chain.Rule{ { Status: chain.AccessDenied, @@ -773,7 +953,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(t, testContainer) + ownerScriptHash := initOwnerIDScriptHash(testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -857,7 +1037,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(t, testContainer) + ownerScriptHash := initOwnerIDScriptHash(testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -1079,6 +1259,11 @@ func (s *srvStub) List(context.Context, *container.ListRequest) (*container.List return &container.ListResponse{}, nil } +func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error { + s.calls["ListStream"]++ + return nil +} + func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) { s.calls["Put"]++ return &container.PutResponse{}, nil @@ -1088,7 +1273,7 @@ type irStub struct { keys [][]byte } -func (s *irStub) InnerRingKeys() ([][]byte, error) { +func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) { return s.keys, nil } @@ -1096,7 +1281,7 @@ type containerStub struct { c map[cid.ID]*containercore.Container } -func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) { +func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) { if v, ok := s.c[id]; ok { return v, nil } @@ -1108,21 +1293,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch() (uint64, error) { +func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } @@ -1131,7 +1316,7 @@ type frostfsidStub struct { subjectsExt map[util.Uint160]*client.SubjectExtended } -func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) { +func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) { s, ok := f.subjects[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1139,7 +1324,7 @@ func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) return s, nil } -func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) { s, ok := f.subjectsExt[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1527,26 +1712,21 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put return req } -func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 { +func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 { var ownerSDK *user.ID owner := testContainer.Owner() ownerSDK = &owner - sc, err := ownerSDK.ScriptHash() - require.NoError(t, err) - return sc + return ownerSDK.ScriptHash() } func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) { var actorUserID user.ID user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey())) - var err error - actorScriptHash, err = actorUserID.ScriptHash() - require.NoError(t, err) + actorScriptHash = actorUserID.ScriptHash() var ownerUserID user.ID user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey())) - ownerScriptHash, err = ownerUserID.ScriptHash() - require.NoError(t, err) + ownerScriptHash = ownerUserID.ScriptHash() require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String()) return } diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go index 411eb4863..b235efa3c 100644 --- a/pkg/services/container/audit.go +++ b/pkg/services/container/audit.go @@ -63,6 +63,17 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c return res, err } +// ListStream implements Server. +func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error { + err := a.next.ListStream(req, stream) + if !a.enabled.Load() { + return err + } + audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req, + audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) + return err +} + // Put implements Server. func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { res, err := a.next.Put(ctx, req) diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go index 70234d3de..cdd0d2514 100644 --- a/pkg/services/container/executor.go +++ b/pkg/services/container/executor.go @@ -14,6 +14,7 @@ type ServiceExecutor interface { Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error) Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error) List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error) + ListStream(context.Context, *container.ListStreamRequest, ListStream) error } type executorSvc struct { @@ -93,3 +94,11 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co s.respSvc.SetMeta(resp) return resp, nil } + +func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error { + err := s.exec.ListStream(stream.Context(), req, stream) + if err != nil { + return fmt.Errorf("could not execute ListStream request: %w", err) + } + return nil +} diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index 211f469f3..eaa608eba 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -29,7 +29,8 @@ type Reader interface { // ContainersOf returns a list of container identifiers belonging // to the specified user of FrostFS system. Returns the identifiers // of all FrostFS containers if pointer to owner identifier is nil. - ContainersOf(*user.ID) ([]cid.ID, error) + ContainersOf(context.Context, *user.ID) ([]cid.ID, error) + IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error } // Writer is an interface of container storage updater. @@ -132,7 +133,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body return new(container.DeleteResponseBody), nil } -func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { +func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -145,7 +146,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) ( return nil, fmt.Errorf("invalid container ID: %w", err) } - cnr, err := s.rdr.Get(id) + cnr, err := s.rdr.Get(ctx, id) if err != nil { return nil, err } @@ -172,7 +173,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) ( return res, nil } -func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { +func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errMissingUserID @@ -185,7 +186,7 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return nil, fmt.Errorf("invalid user ID: %w", err) } - cnrs, err := s.rdr.ContainersOf(&id) + cnrs, err := s.rdr.ContainersOf(ctx, &id) if err != nil { return nil, err } @@ -200,3 +201,56 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return res, nil } + +func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error { + body := req.GetBody() + idV2 := body.GetOwnerID() + if idV2 == nil { + return errMissingUserID + } + + var id user.ID + + err := id.ReadFromV2(*idV2) + if err != nil { + return fmt.Errorf("invalid user ID: %w", err) + } + + resBody := new(container.ListStreamResponseBody) + r := new(container.ListStreamResponse) + r.SetBody(resBody) + + var cidList []refs.ContainerID + + // Amount of containers to send at once. + const batchSize = 1000 + + processCID := func(id cid.ID) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var refID refs.ContainerID + id.WriteToV2(&refID) + cidList = append(cidList, refID) + if len(cidList) == batchSize { + r.GetBody().SetContainerIDs(cidList) + cidList = cidList[:0] + return stream.Send(r) + } + return nil + } + + if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil { + return err + } + + if len(cidList) > 0 { + r.GetBody().SetContainerIDs(cidList) + return stream.Send(r) + } + + return nil +} diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go index 78fd3d34c..d9208077d 100644 --- a/pkg/services/container/server.go +++ b/pkg/services/container/server.go @@ -3,6 +3,7 @@ package container import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" ) @@ -12,4 +13,11 @@ type Server interface { Get(context.Context, *container.GetRequest) (*container.GetResponse, error) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) List(context.Context, *container.ListRequest) (*container.ListResponse, error) + ListStream(*container.ListStreamRequest, ListStream) error +} + +// ListStream is an interface of FrostFS API v2 compatible search streamer. +type ListStream interface { + util.ServerStream + Send(*container.ListStreamResponse) error } diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go index c478c0e1c..85fe7ae87 100644 --- a/pkg/services/container/sign.go +++ b/pkg/services/container/sign.go @@ -56,3 +56,40 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req)) return resp, s.sigSvc.SignResponse(resp, err) } + +func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error { + if err := s.sigSvc.VerifyRequest(req); err != nil { + resp := new(container.ListStreamResponse) + _ = s.sigSvc.SignResponse(resp, err) + return stream.Send(resp) + } + + ss := &listStreamSigner{ + ListStream: stream, + sigSvc: s.sigSvc, + } + err := s.svc.ListStream(req, ss) + if err != nil || !ss.nonEmptyResp { + return ss.send(new(container.ListStreamResponse), err) + } + return nil +} + +type listStreamSigner struct { + ListStream + sigSvc *util.SignService + + nonEmptyResp bool // set on first Send call +} + +func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error { + s.nonEmptyResp = true + return s.send(resp, nil) +} + +func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error { + if err := s.sigSvc.SignResponse(resp, err); err != nil { + return err + } + return s.ListStream.Send(resp) +} diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go new file mode 100644 index 000000000..4f8708da7 --- /dev/null +++ b/pkg/services/container/transport_splitter.go @@ -0,0 +1,92 @@ +package container + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" +) + +type ( + TransportSplitter struct { + next Server + + respSvc *response.Service + cnrAmount uint32 + } + + listStreamMsgSizeCtrl struct { + util.ServerStream + stream ListStream + respSvc *response.Service + cnrAmount uint32 + } +) + +func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server { + return &TransportSplitter{ + next: next, + respSvc: respSvc, + cnrAmount: cnrAmount, + } +} + +func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { + return s.next.Put(ctx, req) +} + +func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { + return s.next.Delete(ctx, req) +} + +func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { + return s.next.Get(ctx, req) +} + +func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { + return s.next.List(ctx, req) +} + +func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error { + return s.next.ListStream(req, &listStreamMsgSizeCtrl{ + ServerStream: stream, + stream: stream, + respSvc: s.respSvc, + cnrAmount: s.cnrAmount, + }) +} + +func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error { + s.respSvc.SetMeta(resp) + body := resp.GetBody() + ids := body.GetContainerIDs() + + var newResp *container.ListStreamResponse + + for { + if newResp == nil { + newResp = new(container.ListStreamResponse) + newResp.SetBody(body) + } + + cut := min(s.cnrAmount, uint32(len(ids))) + + body.SetContainerIDs(ids[:cut]) + newResp.SetMetaHeader(resp.GetMetaHeader()) + newResp.SetVerificationHeader(resp.GetVerificationHeader()) + + if err := s.stream.Send(newResp); err != nil { + return fmt.Errorf("TransportSplitter: %w", err) + } + + ids = ids[cut:] + + if len(ids) == 0 { + break + } + } + + return nil +} diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index e2c385c6a..0509d2646 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -48,7 +48,7 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) ( resp := new(control.TickEpochResponse) resp.SetBody(new(control.TickEpochResponse_Body)) - epoch, err := s.netmapClient.Epoch() + epoch, err := s.netmapClient.Epoch(ctx) if err != nil { return nil, fmt.Errorf("getting current epoch: %w", err) } @@ -77,7 +77,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) resp := new(control.RemoveNodeResponse) resp.SetBody(new(control.RemoveNodeResponse_Body)) - nm, err := s.netmapClient.NetMap() + nm, err := s.netmapClient.NetMap(ctx) if err != nil { return nil, fmt.Errorf("getting netmap: %w", err) } @@ -138,7 +138,7 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error()) } - cids, err := s.containerClient.ContainersOf(&owner) + cids, err := s.containerClient.ContainersOf(ctx, &owner) if err != nil { return nil, fmt.Errorf("failed to get owner's containers: %w", err) } diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index c2a4f88a6..0cfca71c1 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -35,8 +35,7 @@ func panicOnPrmValue(n string, v any) { // the parameterized private key. func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { // verify required parameters - switch { - case prm.healthChecker == nil: + if prm.healthChecker == nil { panicOnPrmValue("health checker", prm.healthChecker) } diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 514061db4..0c4236d0e 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -1,6 +1,8 @@ package control import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" ) @@ -15,7 +17,6 @@ const ( rpcListShards = "ListShards" rpcSetShardMode = "SetShardMode" rpcSynchronizeTree = "SynchronizeTree" - rpcEvacuateShard = "EvacuateShard" rpcStartShardEvacuation = "StartShardEvacuation" rpcGetShardEvacuationStatus = "GetShardEvacuationStatus" rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus" @@ -31,6 +32,7 @@ const ( rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides" rpcDetachShards = "DetachShards" rpcStartShardRebuild = "StartShardRebuild" + rpcListShardsForObject = "ListShardsForObject" ) // HealthCheck executes ControlService.HealthCheck RPC. @@ -74,6 +76,7 @@ func SetNetmapStatus( // GetNetmapStatus executes ControlService.GetNetmapStatus RPC. func GetNetmapStatus( + _ context.Context, cli *client.Client, req *GetNetmapStatusRequest, opts ...client.CallOption, @@ -162,19 +165,6 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl return wResp.message, nil } -// EvacuateShard executes ControlService.EvacuateShard RPC. -func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) { - wResp := newResponseWrapper[EvacuateShardResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - // StartShardEvacuation executes ControlService.StartShardEvacuation RPC. func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) { wResp := newResponseWrapper[StartShardEvacuationResponse]() @@ -375,3 +365,22 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts . return wResp.message, nil } + +// ListShardsForObject executes ControlService.ListShardsForObject RPC. +func ListShardsForObject( + cli *client.Client, + req *ListShardsForObjectRequest, + opts ...client.CallOption, +) (*ListShardsForObjectResponse, error) { + wResp := newResponseWrapper[ListShardsForObjectResponse]() + + wReq := &requestWrapper{ + m: req, + } + err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...) + if err != nil { + return nil, err + } + + return wResp.message, nil +} diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go deleted file mode 100644 index ae3413373..000000000 --- a/pkg/services/control/server/evacuate.go +++ /dev/null @@ -1,188 +0,0 @@ -package control - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") - -func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - prm := engine.EvacuateShardPrm{ - ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - ObjectsHandler: s.replicateObject, - Scope: engine.EvacuateScopeObjects, - } - - res, err := s.s.Evacuate(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.EvacuateShardResponse{ - Body: &control.EvacuateShardResponse_Body{ - Count: uint32(res.ObjectsEvacuated()), - }, - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - cid, ok := obj.ContainerID() - if !ok { - // Return nil to prevent situations where a shard can't be evacuated - // because of a single bad/corrupted object. - return false, nil - } - - nodes, err := s.getContainerNodes(cid) - if err != nil { - return false, err - } - - if len(nodes) == 0 { - return false, nil - } - - var res replicatorResult - task := replicator.Task{ - NumCopies: 1, - Addr: addr, - Obj: obj, - Nodes: nodes, - } - s.replicator.HandleReplicationTask(ctx, task, &res) - - if res.count == 0 { - return false, errors.New("object was not replicated") - } - return true, nil -} - -func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { - nodes, err := s.getContainerNodes(contID) - if err != nil { - return false, "", err - } - if len(nodes) == 0 { - return false, "", nil - } - - for _, node := range nodes { - err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) - if err == nil { - return true, hex.EncodeToString(node.PublicKey()), nil - } - } - return false, "", err -} - -func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { - rawCID := make([]byte, sha256.Size) - contID.Encode(rawCID) - - var height uint64 - for { - op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) - if err != nil { - return err - } - - if op.Time == 0 { - return nil - } - - req := &tree.ApplyRequest{ - Body: &tree.ApplyRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Operation: &tree.LogMove{ - ParentId: op.Parent, - Meta: op.Meta.Bytes(), - ChildId: op.Child, - }, - }, - } - - err = tree.SignMessage(req, s.key) - if err != nil { - return fmt.Errorf("can't message apply request: %w", err) - } - - err = s.treeService.ReplicateTreeOp(ctx, node, req) - if err != nil { - return err - } - - height = op.Time + 1 - } -} - -func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { - nm, err := s.netMapSrc.GetNetMap(0) - if err != nil { - return nil, err - } - - c, err := s.cnrSrc.Get(contID) - if err != nil { - return nil, err - } - - binCnr := make([]byte, sha256.Size) - contID.Encode(binCnr) - - ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) - if err != nil { - return nil, errFailedToBuildListOfContainerNodes - } - - nodes := placement.FlattenNodes(ns) - bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() - for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body - if bytes.Equal(nodes[i].PublicKey(), bs) { - copy(nodes[i:], nodes[i+1:]) - nodes = nodes[:len(nodes)-1] - } - } - return nodes, nil -} - -type replicatorResult struct { - count int -} - -// SubmitSuccessfulReplication implements the replicator.TaskResult interface. -func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { - r.count++ -} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index 146ac7e16..f3ba9015e 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -1,17 +1,32 @@ package control import ( + "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" + "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) +var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") + func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) { err := s.isValidRequest(req) if err != nil { @@ -27,15 +42,13 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha IgnoreErrors: req.GetBody().GetIgnoreErrors(), ObjectsHandler: s.replicateObject, TreeHandler: s.replicateTree, - Async: true, Scope: engine.EvacuateScope(req.GetBody().GetScope()), ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(), ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(), RepOneOnly: req.GetBody().GetRepOneOnly(), } - _, err = s.s.Evacuate(ctx, prm) - if err != nil { + if err = s.s.Evacuate(ctx, prm); err != nil { var logicalErr logicerr.Logical if errors.As(err, &logicalErr) { return nil, status.Error(codes.Aborted, err.Error()) @@ -135,3 +148,133 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re } return resp, nil } + +func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { + cid, ok := obj.ContainerID() + if !ok { + // Return nil to prevent situations where a shard can't be evacuated + // because of a single bad/corrupted object. + return false, nil + } + + nodes, err := s.getContainerNodes(ctx, cid) + if err != nil { + return false, err + } + + if len(nodes) == 0 { + return false, nil + } + + var res replicatorResult + task := replicator.Task{ + NumCopies: 1, + Addr: addr, + Obj: obj, + Nodes: nodes, + } + s.replicator.HandleReplicationTask(ctx, task, &res) + + if res.count == 0 { + return false, errors.New("object was not replicated") + } + return true, nil +} + +func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { + nodes, err := s.getContainerNodes(ctx, contID) + if err != nil { + return false, "", err + } + if len(nodes) == 0 { + return false, "", nil + } + + for _, node := range nodes { + err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) + if err == nil { + return true, hex.EncodeToString(node.PublicKey()), nil + } + } + return false, "", err +} + +func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { + rawCID := make([]byte, sha256.Size) + contID.Encode(rawCID) + + var height uint64 + for { + op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) + if err != nil { + return err + } + + if op.Time == 0 { + return nil + } + + req := &tree.ApplyRequest{ + Body: &tree.ApplyRequest_Body{ + ContainerId: rawCID, + TreeId: treeID, + Operation: &tree.LogMove{ + ParentId: op.Parent, + Meta: op.Bytes(), + ChildId: op.Child, + }, + }, + } + + err = tree.SignMessage(req, s.key) + if err != nil { + return fmt.Errorf("can't message apply request: %w", err) + } + + err = s.treeService.ReplicateTreeOp(ctx, node, req) + if err != nil { + return err + } + + height = op.Time + 1 + } +} + +func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) { + nm, err := s.netMapSrc.GetNetMap(ctx, 0) + if err != nil { + return nil, err + } + + c, err := s.cnrSrc.Get(ctx, contID) + if err != nil { + return nil, err + } + + binCnr := make([]byte, sha256.Size) + contID.Encode(binCnr) + + ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) + if err != nil { + return nil, errFailedToBuildListOfContainerNodes + } + + nodes := placement.FlattenNodes(ns) + bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() + for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body + if bytes.Equal(nodes[i].PublicKey(), bs) { + copy(nodes[i:], nodes[i+1:]) + nodes = nodes[:len(nodes)-1] + } + } + return nodes, nil +} + +type replicatorResult struct { + count int +} + +// SubmitSuccessfulReplication implements the replicator.TaskResult interface. +func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { + r.count++ +} diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go index d9fefc38e..a8ef7809e 100644 --- a/pkg/services/control/server/gc.go +++ b/pkg/services/control/server/gc.go @@ -42,8 +42,7 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques prm.WithForceRemoval() prm.WithAddress(addrList[i]) - _, err := s.s.Delete(ctx, prm) - if err != nil && firstErr == nil { + if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil { firstErr = err } } diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go index 1c038253a..5e0496910 100644 --- a/pkg/services/control/server/get_netmap_status.go +++ b/pkg/services/control/server/get_netmap_status.go @@ -10,12 +10,12 @@ import ( ) // GetNetmapStatus gets node status in FrostFS network. -func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { +func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } - st, epoch, err := s.nodeState.GetNetmapStatus() + st, epoch, err := s.nodeState.GetNetmapStatus(ctx) if err != nil { return nil, err } diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go new file mode 100644 index 000000000..39565ed50 --- /dev/null +++ b/pkg/services/control/server/list_shards_for_object.go @@ -0,0 +1,65 @@ +package control + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) { + err := s.isValidRequest(req) + if err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + var obj oid.ID + err = obj.DecodeString(req.GetBody().GetObjectId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + var cnr cid.ID + err = cnr.DecodeString(req.GetBody().GetContainerId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + resp := new(control.ListShardsForObjectResponse) + body := new(control.ListShardsForObjectResponse_Body) + resp.SetBody(body) + + var objAddr oid.Address + objAddr.SetContainer(cnr) + objAddr.SetObject(obj) + info, err := s.s.ListShardsForObject(ctx, objAddr) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if len(info) == 0 { + return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject) + } + + body.SetShard_ID(shardInfoToProto(info)) + + // Sign the response + if err := ctrlmessage.Sign(s.key, resp); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return resp, nil +} + +func shardInfoToProto(infos []shard.Info) [][]byte { + shardInfos := make([][]byte, 0, len(infos)) + for _, info := range infos { + shardInfos = append(shardInfos, *info.ID) + } + + return shardInfos +} diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index 94aa1ff5b..59d701bc6 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -52,7 +52,7 @@ type NodeState interface { // but starts local maintenance regardless of the network settings. ForceMaintenance(ctx context.Context) error - GetNetmapStatus() (control.NetmapStatus, uint64, error) + GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) } // LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index ae1939e13..4c539acfc 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -30,11 +30,6 @@ service ControlService { // Synchronizes all log operations for the specified tree. rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse); - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse); - // StartShardEvacuation starts moving all data from one shard to the others. rpc StartShardEvacuation(StartShardEvacuationRequest) returns (StartShardEvacuationResponse); @@ -94,6 +89,9 @@ service ControlService { // StartShardRebuild starts shard rebuild process. rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse); + + // ListShardsForObject returns shard info where object is stored. + rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse); } // Health check request. @@ -734,3 +732,23 @@ message StartShardRebuildResponse { Signature signature = 2; } + +message ListShardsForObjectRequest { + message Body { + string object_id = 1; + string container_id = 2; + } + + Body body = 1; + Signature signature = 2; +} + +message ListShardsForObjectResponse { + message Body { + // List of the node's shards storing object. + repeated bytes shard_ID = 1; + } + + Body body = 1; + Signature signature = 2; +} diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 0b4e3cf32..44849d591 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -17303,3 +17303,727 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { in.Consumed() } } + +type ListShardsForObjectRequest_Body struct { + ObjectId string `json:"objectId"` + ContainerId string `json:"containerId"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.StringSize(1, x.ObjectId) + size += proto.StringSize(2, x.ContainerId) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ObjectId) != 0 { + mm.AppendString(1, x.ObjectId) + } + if len(x.ContainerId) != 0 { + mm.AppendString(2, x.ContainerId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body") + } + switch fc.FieldNum { + case 1: // ObjectId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ObjectId") + } + x.ObjectId = data + case 2: // ContainerId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + } + } + return nil +} +func (x *ListShardsForObjectRequest_Body) GetObjectId() string { + if x != nil { + return x.ObjectId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) { + x.ObjectId = v +} +func (x *ListShardsForObjectRequest_Body) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) { + x.ContainerId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"objectId\":" + out.RawString(prefix) + out.String(x.ObjectId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + out.String(x.ContainerId) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "objectId": + { + var f string + f = in.String() + x.ObjectId = f + } + case "containerId": + { + var f string + f = in.String() + x.ContainerId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectRequest struct { + Body *ListShardsForObjectRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectRequest) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) { + x.Body = v +} +func (x *ListShardsForObjectRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectRequest_Body + f = new(ListShardsForObjectRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse_Body struct { + Shard_ID [][]byte `json:"shardID"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.RepeatedBytesSize(1, x.Shard_ID) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + } + } + return nil +} +func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse struct { + Body *ListShardsForObjectResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectResponse) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) { + x.Body = v +} +func (x *ListShardsForObjectResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectResponse_Body + f = new(ListShardsForObjectResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index f5cfefa85..045662ccf 100644 --- a/pkg/services/control/service_grpc.pb.go +++ b/pkg/services/control/service_grpc.pb.go @@ -26,7 +26,6 @@ const ( ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards" ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode" ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree" - ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard" ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation" ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus" ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus" @@ -42,6 +41,7 @@ const ( ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache" ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards" ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild" + ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject" ) // ControlServiceClient is the client API for ControlService service. @@ -62,10 +62,6 @@ type ControlServiceClient interface { SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) // Synchronizes all log operations for the specified tree. SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) // StartShardEvacuation starts moving all data from one shard to the others. StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) // GetShardEvacuationStatus returns evacuation status. @@ -100,6 +96,8 @@ type ControlServiceClient interface { DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) } type controlServiceClient struct { @@ -173,15 +171,6 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron return out, nil } -func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) { - out := new(EvacuateShardResponse) - err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) { out := new(StartShardEvacuationResponse) err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...) @@ -317,6 +306,15 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS return out, nil } +func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) { + out := new(ListShardsForObjectResponse) + err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ControlServiceServer is the server API for ControlService service. // All implementations should embed UnimplementedControlServiceServer // for forward compatibility @@ -335,10 +333,6 @@ type ControlServiceServer interface { SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) // Synchronizes all log operations for the specified tree. SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) // StartShardEvacuation starts moving all data from one shard to the others. StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) // GetShardEvacuationStatus returns evacuation status. @@ -373,6 +367,8 @@ type ControlServiceServer interface { DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) } // UnimplementedControlServiceServer should be embedded to have forward compatible implementations. @@ -400,9 +396,6 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented") } -func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented") -} func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented") } @@ -448,6 +441,9 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented") } +func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented") +} // UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ControlServiceServer will @@ -586,24 +582,6 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } -func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EvacuateShardRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).EvacuateShard(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_EvacuateShard_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartShardEvacuationRequest) if err := dec(in); err != nil { @@ -874,6 +852,24 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListShardsForObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServiceServer).ListShardsForObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControlService_ListShardsForObject_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + // ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -909,10 +905,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SynchronizeTree", Handler: _ControlService_SynchronizeTree_Handler, }, - { - MethodName: "EvacuateShard", - Handler: _ControlService_EvacuateShard_Handler, - }, { MethodName: "StartShardEvacuation", Handler: _ControlService_StartShardEvacuation_Handler, @@ -973,6 +965,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "StartShardRebuild", Handler: _ControlService_StartShardRebuild_Handler, }, + { + MethodName: "ListShardsForObject", + Handler: _ControlService_ListShardsForObject_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/services/control/service.proto", diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 5223047df..1b92fdaad 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" @@ -42,14 +43,16 @@ type NetworkInfo interface { // Dump must return recent network information in FrostFS API v2 NetworkInfo structure. // // If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset. - Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error) + Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error) } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil { - // this should never happen, otherwise it programmers bug - panic("can't create netmap execution service") - } + // this should never happen, otherwise it's a programmer's bug + msg := "BUG: can't create netmap execution service" + assert.False(s == nil, msg, "node state is nil") + assert.False(netInfo == nil, msg, "network info is nil") + assert.False(respSvc == nil, msg, "response service is nil") + assert.True(version.IsValid(v), msg, "invalid version") res := &executorSvc{ state: s, @@ -82,7 +85,7 @@ func (s *executorSvc) LocalNodeInfo( } func (s *executorSvc) NetworkInfo( - _ context.Context, + ctx context.Context, req *netmap.NetworkInfoRequest, ) (*netmap.NetworkInfoResponse, error) { verV2 := req.GetMetaHeader().GetVersion() @@ -95,7 +98,7 @@ func (s *executorSvc) NetworkInfo( return nil, fmt.Errorf("can't read version: %w", err) } - ni, err := s.netInfo.Dump(ver) + ni, err := s.netInfo.Dump(ctx, ver) if err != nil { return nil, err } diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go deleted file mode 100644 index 94e015abe..000000000 --- a/pkg/services/object/acl/eacl/v2/eacl_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "context" - "crypto/ecdsa" - "errors" - "testing" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type testLocalStorage struct { - t *testing.T - - expAddr oid.Address - - obj *objectSDK.Object - - err error -} - -func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(s.t, addr.Container().Equals(s.expAddr.Container())) - require.True(s.t, addr.Object().Equals(s.expAddr.Object())) - - return s.obj, s.err -} - -func testXHeaders(strs ...string) []session.XHeader { - res := make([]session.XHeader, len(strs)/2) - - for i := 0; i < len(strs); i += 2 { - res[i/2].SetKey(strs[i]) - res[i/2].SetValue(strs[i+1]) - } - - return res -} - -func TestHeadRequest(t *testing.T) { - req := new(objectV2.HeadRequest) - - meta := new(session.RequestMetaHeader) - req.SetMetaHeader(meta) - - body := new(objectV2.HeadRequestBody) - req.SetBody(body) - - addr := oidtest.Address() - - var addrV2 refs.Address - addr.WriteToV2(&addrV2) - - body.SetAddress(&addrV2) - - xKey := "x-key" - xVal := "x-val" - xHdrs := testXHeaders( - xKey, xVal, - ) - - meta.SetXHeaders(xHdrs) - - obj := objectSDK.New() - - attrKey := "attr_key" - attrVal := "attr_val" - var attr objectSDK.Attribute - attr.SetKey(attrKey) - attr.SetValue(attrVal) - obj.SetAttributes(attr) - - table := new(eaclSDK.Table) - - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - senderKey := priv.PublicKey() - - r := eaclSDK.NewRecord() - r.SetOperation(eaclSDK.OperationHead) - r.SetAction(eaclSDK.ActionDeny) - r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal) - r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal) - eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table.AddRecord(r) - - lStorage := &testLocalStorage{ - t: t, - expAddr: addr, - obj: obj, - } - - id := addr.Object() - - newSource := func(t *testing.T) eaclSDK.TypedHeaderSource { - hdrSrc, err := NewMessageHeaderSource( - lStorage, - NewRequestXHeaderSource(req), - addr.Container(), - WithOID(&id)) - require.NoError(t, err) - return hdrSrc - } - - cnr := addr.Container() - - unit := new(eaclSDK.ValidationUnit). - WithContainerID(&cnr). - WithOperation(eaclSDK.OperationHead). - WithSenderKey(senderKey.Bytes()). - WithEACLTable(table) - - validator := eaclSDK.NewValidator() - - checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(nil) - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(xHdrs) - - obj.SetAttributes() - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - lStorage.err = errors.New("any error") - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - r.SetAction(eaclSDK.ActionAllow) - - rID := eaclSDK.NewRecord() - rID.SetOperation(eaclSDK.OperationHead) - rID.SetAction(eaclSDK.ActionDeny) - rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object()) - eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table = eaclSDK.NewTable() - table.AddRecord(r) - table.AddRecord(rID) - - unit.WithEACLTable(table) - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) -} - -func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.True(t, fromRule) - require.Equal(t, expected, actual) -} - -func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.False(t, fromRule) - require.Equal(t, eaclSDK.ActionAllow, actual) -} diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go deleted file mode 100644 index ecb793df8..000000000 --- a/pkg/services/object/acl/eacl/v2/headers.go +++ /dev/null @@ -1,246 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type Option func(*cfg) - -type cfg struct { - storage ObjectStorage - - msg XHeaderSource - - cnr cid.ID - obj *oid.ID -} - -type ObjectStorage interface { - Head(context.Context, oid.Address) (*objectSDK.Object, error) -} - -type Request interface { - GetMetaHeader() *session.RequestMetaHeader -} - -type Response interface { - GetMetaHeader() *session.ResponseMetaHeader -} - -type headerSource struct { - requestHeaders []eaclSDK.Header - objectHeaders []eaclSDK.Header - - incompleteObjectHeaders bool -} - -func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) { - cfg := &cfg{ - storage: os, - cnr: cnrID, - msg: xhs, - } - - for i := range opts { - opts[i](cfg) - } - - if cfg.msg == nil { - return nil, errors.New("message is not provided") - } - - var res headerSource - - err := cfg.readObjectHeaders(&res) - if err != nil { - return nil, err - } - - res.requestHeaders = cfg.msg.GetXHeaders() - - return res, nil -} - -func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) { - switch typ { - default: - return nil, true - case eaclSDK.HeaderFromRequest: - return h.requestHeaders, true - case eaclSDK.HeaderFromObject: - return h.objectHeaders, !h.incompleteObjectHeaders - } -} - -type xHeader session.XHeader - -func (x xHeader) Key() string { - return (*session.XHeader)(&x).GetKey() -} - -func (x xHeader) Value() string { - return (*session.XHeader)(&x).GetValue() -} - -var errMissingOID = errors.New("object ID is missing") - -func (h *cfg) readObjectHeaders(dst *headerSource) error { - switch m := h.msg.(type) { - default: - panic(fmt.Sprintf("unexpected message type %T", h.msg)) - case requestXHeaderSource: - return h.readObjectHeadersFromRequestXHeaderSource(m, dst) - case responseXHeaderSource: - return h.readObjectHeadersResponseXHeaderSource(m, dst) - } -} - -func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error { - switch req := m.req.(type) { - case - *objectV2.GetRequest, - *objectV2.HeadRequest: - if h.obj == nil { - return errMissingOID - } - - objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objHeaders - dst.incompleteObjectHeaders = !completed - case - *objectV2.GetRangeRequest, - *objectV2.GetRangeHashRequest, - *objectV2.DeleteRequest: - if h.obj == nil { - return errMissingOID - } - - dst.objectHeaders = addressHeaders(h.cnr, h.obj) - case *objectV2.PutRequest: - if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.PutSingleRequest: - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj) - case *objectV2.SearchRequest: - cnrV2 := req.GetBody().GetContainerID() - var cnr cid.ID - - if cnrV2 != nil { - if err := cnr.ReadFromV2(*cnrV2); err != nil { - return fmt.Errorf("can't parse container ID: %w", err) - } - } - - dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)} - } - return nil -} - -func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error { - switch resp := m.resp.(type) { - default: - objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objectHeaders - dst.incompleteObjectHeaders = !completed - case *objectV2.GetResponse: - if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.HeadResponse: - oV2 := new(objectV2.Object) - - var hdr *objectV2.Header - - switch v := resp.GetBody().GetHeaderPart().(type) { - case *objectV2.ShortHeader: - hdr = new(objectV2.Header) - - var idV2 refsV2.ContainerID - h.cnr.WriteToV2(&idV2) - - hdr.SetContainerID(&idV2) - hdr.SetVersion(v.GetVersion()) - hdr.SetCreationEpoch(v.GetCreationEpoch()) - hdr.SetOwnerID(v.GetOwnerID()) - hdr.SetObjectType(v.GetObjectType()) - hdr.SetPayloadLength(v.GetPayloadLength()) - case *objectV2.HeaderWithSignature: - hdr = v.GetHeader() - } - - oV2.SetHeader(hdr) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - return nil -} - -func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) { - if idObj != nil { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*idObj) - - obj, err := h.storage.Head(context.TODO(), addr) - if err == nil { - return headersFromObject(obj, cnr, idObj), true - } - } - - return addressHeaders(cnr, idObj), false -} - -func cidHeader(idCnr cid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectContainerID, - v: idCnr.EncodeToString(), - } -} - -func oidHeader(obj oid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectID, - v: obj.EncodeToString(), - } -} - -func ownerIDHeader(ownerID user.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectOwnerID, - v: ownerID.EncodeToString(), - } -} - -func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - hh := make([]eaclSDK.Header, 0, 2) - hh = append(hh, cidHeader(cnr)) - - if oid != nil { - hh = append(hh, oidHeader(*oid)) - } - - return hh -} diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go deleted file mode 100644 index 92570a3c5..000000000 --- a/pkg/services/object/acl/eacl/v2/object.go +++ /dev/null @@ -1,92 +0,0 @@ -package v2 - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type sysObjHdr struct { - k, v string -} - -func (s sysObjHdr) Key() string { - return s.k -} - -func (s sysObjHdr) Value() string { - return s.v -} - -func u64Value(v uint64) string { - return strconv.FormatUint(v, 10) -} - -func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - var count int - for obj := obj; obj != nil; obj = obj.Parent() { - count += 9 + len(obj.Attributes()) - } - - res := make([]eaclSDK.Header, 0, count) - for ; obj != nil; obj = obj.Parent() { - res = append(res, - cidHeader(cnr), - // creation epoch - sysObjHdr{ - k: acl.FilterObjectCreationEpoch, - v: u64Value(obj.CreationEpoch()), - }, - // payload size - sysObjHdr{ - k: acl.FilterObjectPayloadLength, - v: u64Value(obj.PayloadSize()), - }, - // object version - sysObjHdr{ - k: acl.FilterObjectVersion, - v: obj.Version().String(), - }, - // object type - sysObjHdr{ - k: acl.FilterObjectType, - v: obj.Type().String(), - }, - ) - - if oid != nil { - res = append(res, oidHeader(*oid)) - } - - if idOwner := obj.OwnerID(); !idOwner.IsEmpty() { - res = append(res, ownerIDHeader(idOwner)) - } - - cs, ok := obj.PayloadChecksum() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectPayloadHash, - v: cs.String(), - }) - } - - cs, ok = obj.PayloadHomomorphicHash() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectHomomorphicHash, - v: cs.String(), - }) - } - - attrs := obj.Attributes() - for i := range attrs { - res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface - } - } - - return res -} diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go deleted file mode 100644 index d91a21c75..000000000 --- a/pkg/services/object/acl/eacl/v2/opts.go +++ /dev/null @@ -1,11 +0,0 @@ -package v2 - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func WithOID(v *oid.ID) Option { - return func(c *cfg) { - c.obj = v - } -} diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go deleted file mode 100644 index ce380c117..000000000 --- a/pkg/services/object/acl/eacl/v2/xheader.go +++ /dev/null @@ -1,69 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" -) - -type XHeaderSource interface { - GetXHeaders() []eaclSDK.Header -} - -type requestXHeaderSource struct { - req Request -} - -func NewRequestXHeaderSource(req Request) XHeaderSource { - return requestXHeaderSource{req: req} -} - -type responseXHeaderSource struct { - resp Response - - req Request -} - -func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource { - return responseXHeaderSource{resp: resp, req: req} -} - -func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - ln += len(meta.GetXHeaders()) - } - - res := make([]eaclSDK.Header, 0, ln) - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - for i := range x { - res = append(res, (xHeader)(x[i])) - } - } - - return res -} - -func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - xHdrs := make([][]session.XHeader, 0) - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - - ln += len(x) - - xHdrs = append(xHdrs, x) - } - - res := make([]eaclSDK.Header, 0, ln) - - for i := range xHdrs { - for j := range xHdrs[i] { - res = append(res, xHeader(xHdrs[i][j])) - } - } - - return res -} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go deleted file mode 100644 index cd2de174a..000000000 --- a/pkg/services/object/acl/v2/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -package v2 - -import ( - "fmt" -) - -const invalidRequestMessage = "malformed request" - -func malformedRequestError(reason string) error { - return fmt.Errorf("%s: %s", invalidRequestMessage, reason) -} - -var ( - errEmptyBody = malformedRequestError("empty body") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go deleted file mode 100644 index 15fcce884..000000000 --- a/pkg/services/object/acl/v2/opts.go +++ /dev/null @@ -1,12 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// WithLogger returns option to set logger. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go deleted file mode 100644 index 8bd34ccb3..000000000 --- a/pkg/services/object/acl/v2/request.go +++ /dev/null @@ -1,152 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "fmt" - - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// RequestInfo groups parsed version-independent (from SDK library) -// request information and raw API request. -type RequestInfo struct { - basicACL acl.Basic - requestRole acl.Role - operation acl.Op // put, get, head, etc. - cnrOwner user.ID // container owner - - // cnrNamespace defined to which namespace a container is belonged. - cnrNamespace string - - idCnr cid.ID - - // optional for some request - // e.g. Put, Search - obj *oid.ID - - senderKey []byte - - bearer *bearer.Token // bearer token of request - - srcRequest any -} - -func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) { - r.basicACL = basicACL -} - -func (r *RequestInfo) SetRequestRole(requestRole acl.Role) { - r.requestRole = requestRole -} - -func (r *RequestInfo) SetSenderKey(senderKey []byte) { - r.senderKey = senderKey -} - -// Request returns raw API request. -func (r RequestInfo) Request() any { - return r.srcRequest -} - -// ContainerOwner returns owner if the container. -func (r RequestInfo) ContainerOwner() user.ID { - return r.cnrOwner -} - -func (r RequestInfo) ContainerNamespace() string { - return r.cnrNamespace -} - -// ObjectID return object ID. -func (r RequestInfo) ObjectID() *oid.ID { - return r.obj -} - -// ContainerID return container ID. -func (r RequestInfo) ContainerID() cid.ID { - return r.idCnr -} - -// CleanBearer forces cleaning bearer token information. -func (r *RequestInfo) CleanBearer() { - r.bearer = nil -} - -// Bearer returns bearer token of the request. -func (r RequestInfo) Bearer() *bearer.Token { - return r.bearer -} - -// BasicACL returns basic ACL of the container. -func (r RequestInfo) BasicACL() acl.Basic { - return r.basicACL -} - -// SenderKey returns public key of the request's sender. -func (r RequestInfo) SenderKey() []byte { - return r.senderKey -} - -// Operation returns request's operation. -func (r RequestInfo) Operation() acl.Op { - return r.operation -} - -// RequestRole returns request sender's role. -func (r RequestInfo) RequestRole() acl.Role { - return r.requestRole -} - -// MetaWithToken groups session and bearer tokens, -// verification header and raw API request. -type MetaWithToken struct { - vheader *sessionV2.RequestVerificationHeader - token *sessionSDK.Object - bearer *bearer.Token - src any -} - -// RequestOwner returns ownerID and its public key -// according to internal meta information. -func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if r.vheader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if r.bearer != nil && r.bearer.Impersonate() { - return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if r.token != nil { - // verify signature of session token - return ownerFromToken(r.token) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(r.vheader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go deleted file mode 100644 index db0f13ee7..000000000 --- a/pkg/services/object/acl/v2/service.go +++ /dev/null @@ -1,779 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.uber.org/zap" -) - -// Service checks basic ACL rules. -type Service struct { - *cfg - - c objectCore.SenderClassifier -} - -type putStreamBasicChecker struct { - source *Service - next object.PutObjectStream -} - -type patchStreamBasicChecker struct { - source *Service - next object.PatchObjectStream - nonFirstSend bool -} - -// Option represents Service constructor option. -type Option func(*cfg) - -type cfg struct { - log *logger.Logger - - containers container.Source - - irFetcher InnerRingFetcher - - nm netmap.Source - - next object.ServiceServer -} - -// New is a constructor for object ACL checking service. -func New(next object.ServiceServer, - nm netmap.Source, - irf InnerRingFetcher, - cs container.Source, - opts ...Option, -) Service { - cfg := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - next: next, - nm: nm, - irFetcher: irf, - containers: cs, - } - - for i := range opts { - opts[i](cfg) - } - - return Service{ - cfg: cfg, - c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log), - } -} - -// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedGetObjectStream struct { - object.GetObjectStream - - requestInfo RequestInfo -} - -func (w *wrappedGetObjectStream) Context() context.Context { - return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream { - return &wrappedGetObjectStream{ - GetObjectStream: getObjectStream, - requestInfo: reqInfo, - } -} - -// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedRangeStream struct { - object.GetObjectRangeStream - - requestInfo RequestInfo -} - -func (w *wrappedRangeStream) Context() context.Context { - return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream { - return &wrappedRangeStream{ - GetObjectRangeStream: rangeStream, - requestInfo: reqInfo, - } -} - -// wrappedSearchStream propagates RequestContext into SearchStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedSearchStream struct { - object.SearchStream - - requestInfo RequestInfo -} - -func (w *wrappedSearchStream) Context() context.Context { - return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream { - return &wrappedSearchStream{ - SearchStream: searchStream, - requestInfo: reqInfo, - } -} - -// Get implements ServiceServer interface, makes ACL checks and calls -// next Get method in the ServiceServer pipeline. -func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) - if err != nil { - return err - } - - reqInfo.obj = obj - - return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) -} - -func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { - streamer, err := b.next.Put(ctx) - - return putStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { - streamer, err := b.next.Patch(ctx) - - return &patchStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Head( - ctx context.Context, - request *objectV2.HeadRequest, -) (*objectV2.HeadResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.Head(requestContext(ctx, reqInfo), request) -} - -func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { - id, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, id, nil) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) - if err != nil { - return err - } - - return b.next.Search(request, newWrappedSearchStream(stream, reqInfo)) -} - -func (b Service) Delete( - ctx context.Context, - request *objectV2.DeleteRequest, -) (*objectV2.DeleteResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.Delete(requestContext(ctx, reqInfo), request) -} - -func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) - if err != nil { - return err - } - - reqInfo.obj = obj - - return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo)) -} - -func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { - return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{ - Namespace: reqInfo.ContainerNamespace(), - ContainerOwner: reqInfo.ContainerOwner(), - SenderKey: reqInfo.SenderKey(), - Role: reqInfo.RequestRole(), - BearerToken: reqInfo.Bearer(), - }) -} - -func (b Service) GetRangeHash( - ctx context.Context, - request *objectV2.GetRangeHashRequest, -) (*objectV2.GetRangeHashResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) -} - -func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid object owner: %w", err) - } - - obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID()) - if err != nil { - return nil, err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return nil, err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.PutSingle(requestContext(ctx, reqInfo), request) -} - -func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - part := body.GetObjectPart() - if part, ok := part.(*objectV2.PutObjectPartInit); ok { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - idV2 := part.GetHeader().GetOwnerID() - if idV2 == nil { - return errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return fmt.Errorf("invalid object owner: %w", err) - } - - objV2 := part.GetObjectID() - var obj *oid.ID - - if objV2 != nil { - obj = new(oid.ID) - - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) - if err != nil { - return err - } - - reqInfo.obj = obj - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} - -func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - if !p.nonFirstSend { - p.nonFirstSend = true - - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - objV2 := request.GetBody().GetAddress().GetObjectID() - if objV2 == nil { - return errors.New("missing oid") - } - obj := new(oid.ID) - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) - if err != nil { - return err - } - - reqInfo.obj = obj - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { - cnr, err := b.containers.Get(idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch() - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - - if !assertVerb(*req.token, op) { - return info, errInvalidVerb - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.operation = op - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} - -// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. -func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { - cnr, err := b.containers.Get(idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch() - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go deleted file mode 100644 index b03261b90..000000000 --- a/pkg/services/object/acl/v2/types.go +++ /dev/null @@ -1,9 +0,0 @@ -package v2 - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys() ([][]byte, error) -} diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go deleted file mode 100644 index 4b19cecfe..000000000 --- a/pkg/services/object/acl/v2/util_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" - aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "github.com/stretchr/testify/require" -) - -func TestOriginalTokens(t *testing.T) { - sToken := sessiontest.ObjectSigned() - bToken := bearertest.Token() - - pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, bToken.Sign(*pk)) - - var bTokenV2 acl.BearerToken - bToken.WriteToV2(&bTokenV2) - // This line is needed because SDK uses some custom format for - // reserved filters, so `cid.ID` is not converted to string immediately. - require.NoError(t, bToken.ReadFromV2(bTokenV2)) - - var sTokenV2 session.Token - sToken.WriteToV2(&sTokenV2) - - for i := range 10 { - metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) - res, err := originalSessionToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, sToken, res, i) - - bTok, err := originalBearerToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, &bToken, bTok, i) - } -} - -func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader { - metaHeader := new(session.RequestMetaHeader) - metaHeader.SetBearerToken(b) - metaHeader.SetSessionToken(s) - - for range depth { - link := metaHeader - metaHeader = new(session.RequestMetaHeader) - metaHeader.SetOrigin(link) - } - - return metaHeader -} - -func TestIsVerbCompatible(t *testing.T) { - // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28 - table := map[aclsdk.Op][]sessionSDK.ObjectVerb{ - aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet}, - aclsdk.OpObjectHead: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - }, - aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - var contains bool - for _, v := range list { - if v == verb { - contains = true - break - } - } - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index 4a3b5ba5e..bb6067a37 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -64,6 +64,9 @@ type Prm struct { // An encoded container's owner user ID. ContainerOwner user.ID + // Attributes defined for the container. + ContainerAttributes map[string]string + // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token @@ -76,9 +79,10 @@ var errMissingOID = errors.New("object ID is not set") // CheckAPE prepares an APE-request and checks if it is permitted by policies. func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { // APE check is ignored for some inter-node requests. - if prm.Role == nativeschema.PropertyValueContainerRoleContainer { + switch prm.Role { + case nativeschema.PropertyValueContainerRoleContainer: return nil - } else if prm.Role == nativeschema.PropertyValueContainerRoleIR { + case nativeschema.PropertyValueContainerRoleIR: switch prm.Method { case nativeschema.MethodGetObject, nativeschema.MethodHeadObject, @@ -99,7 +103,7 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { return err } - return c.checkerCore.CheckAPE(checkercore.CheckPrm{ + return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{ Request: r, PublicKey: pub, Namespace: prm.Namespace, diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index e03b5750c..97eb2b2d7 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { return pk.GetScriptHash() } -func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -619,21 +619,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch() (uint64, error) { +func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } @@ -641,14 +641,14 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 1b2024ed5..82e660a7f 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -1,10 +1,34 @@ package ape import ( + "errors" + + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) +var ( + errMissingContainerID = malformedRequestError("missing container ID") + errEmptyVerificationHeader = malformedRequestError("empty verification header") + errEmptyBodySig = malformedRequestError("empty at body signature") + errInvalidSessionSig = malformedRequestError("invalid session token signature") + errInvalidSessionOwner = malformedRequestError("invalid session token owner") + errInvalidVerb = malformedRequestError("session token verb is invalid") +) + +func malformedRequestError(reason string) error { + invalidArgErr := &apistatus.InvalidArgument{} + invalidArgErr.SetMessage(reason) + return invalidArgErr +} + func toStatusErr(err error) error { + var chRouterErr *checkercore.ChainRouterError + if !errors.As(err, &chRouterErr) { + errServerInternal := &apistatus.ServerInternal{} + apistatus.WriteInternalServerErr(errServerInternal, err) + return errServerInternal + } errAccessDenied := &apistatus.ObjectAccessDenied{} errAccessDenied.WriteReason("ape denied request: " + err.Error()) return errAccessDenied diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go new file mode 100644 index 000000000..102985aa6 --- /dev/null +++ b/pkg/services/object/ape/metadata.go @@ -0,0 +1,179 @@ +package ape + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +type Metadata struct { + Container cid.ID + Object *oid.ID + MetaHeader *session.RequestMetaHeader + VerificationHeader *session.RequestVerificationHeader + SessionToken *sessionSDK.Object + BearerToken *bearer.Token +} + +func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { + if m.VerificationHeader == nil { + return nil, nil, errEmptyVerificationHeader + } + + if m.BearerToken != nil && m.BearerToken.Impersonate() { + return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) + } + + // if session token is presented, use it as truth source + if m.SessionToken != nil { + // verify signature of session token + return ownerFromToken(m.SessionToken) + } + + // otherwise get original body signature + bodySignature := originalBodySignature(m.VerificationHeader) + if bodySignature == nil { + return nil, nil, errEmptyBodySig + } + + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +// RequestInfo contains request information extracted by request metadata. +type RequestInfo struct { + // Role defines under which role this request is executed. + // It must be represented only as a constant represented in native schema. + Role string + + ContainerOwner user.ID + + ContainerAttributes map[string]string + + // Namespace defines to which namespace a container is belonged. + Namespace string + + // HEX-encoded sender key. + SenderKey string +} + +type RequestInfoExtractor interface { + GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) +} + +type extractor struct { + containers container.Source + + nm netmap.Source + + classifier objectCore.SenderClassifier +} + +func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { + return &extractor{ + containers: containers, + nm: nm, + classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), + } +} + +func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { + currentEpoch, err := e.nm.Epoch(ctx) + if err != nil { + return errors.New("can't fetch current epoch") + } + if sessionToken.ExpiredAt(currentEpoch) { + return new(apistatus.SessionTokenExpired) + } + if sessionToken.InvalidAt(currentEpoch) { + return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) + } + if !assertVerb(*sessionToken, method) { + return errInvalidVerb + } + return nil +} + +func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { + cnr, err := e.containers.Get(ctx, m.Container) + if err != nil { + return ri, err + } + + if m.SessionToken != nil { + if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { + return ri, err + } + } + + ownerID, ownerKey, err := m.RequestOwner() + if err != nil { + return ri, err + } + res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) + if err != nil { + return ri, err + } + + ri.Role = nativeSchemaRole(res.Role) + ri.ContainerOwner = cnr.Value.Owner() + + ri.ContainerAttributes = map[string]string{} + for key, val := range cnr.Value.Attributes() { + ri.ContainerAttributes[key] = val + } + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + ri.Namespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + ri.SenderKey = hex.EncodeToString(res.Key) + + return ri, nil +} + +func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { + var sTok *sessionSDK.Object + + if tokV2 != nil { + sTok = new(sessionSDK.Object) + + err := sTok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { + // if session relates to object's removal, we don't check + // relation of the tombstone to the session here since user + // can't predict tomb's ID. + err = assertSessionRelation(*sTok, cnr, nil) + } else { + err = assertSessionRelation(*sTok, cnr, obj) + } + + if err != nil { + return nil, err + } + } + + return sTok, nil +} diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/ape/metadata_test.go similarity index 83% rename from pkg/services/object/acl/v2/request_test.go rename to pkg/services/object/ape/metadata_test.go index 618af3469..fd919008f 100644 --- a/pkg/services/object/acl/v2/request_test.go +++ b/pkg/services/object/ape/metadata_test.go @@ -1,4 +1,4 @@ -package v2 +package ape import ( "testing" @@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) { vh.SetBodySignature(&userSignature) t.Run("empty verification header", func(t *testing.T) { - req := MetaWithToken{} + req := Metadata{} checkOwner(t, req, nil, errEmptyVerificationHeader) }) t.Run("empty verification header signature", func(t *testing.T) { - req := MetaWithToken{ - vheader: new(sessionV2.RequestVerificationHeader), + req := Metadata{ + VerificationHeader: new(sessionV2.RequestVerificationHeader), } checkOwner(t, req, nil, errEmptyBodySig) }) t.Run("no tokens", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, + req := Metadata{ + VerificationHeader: vh, } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, false), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, false), } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) { pk, err := keys.NewPrivateKey() require.NoError(t, err) - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), - token: newSession(t, pk), + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), + SessionToken: newSession(t, pk), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) t.Run("with session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - token: newSession(t, containerOwner), + req := Metadata{ + VerificationHeader: vh, + SessionToken: newSession(t, containerOwner), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) { var tok sessionSDK.Object require.NoError(t, tok.ReadFromV2(tokV2)) - req := MetaWithToken{ - vheader: vh, - token: &tok, + req := Metadata{ + VerificationHeader: vh, + SessionToken: &tok, } checkOwner(t, req, nil, errInvalidSessionOwner) }) @@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool return &tok } -func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) { +func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { _, actual, err := req.RequestOwner() if expectedErr != nil { require.ErrorIs(t, err, expectedErr) diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index cb9bbf1b8..39dd7f476 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -57,11 +57,16 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string { } // objectProperties collects object properties from address parameters and a header if it is passed. -func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string { +func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string { objectProps := map[string]string{ nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(), } + for attrName, attrValue := range cnrAttrs { + prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName) + objectProps[prop] = attrValue + } + objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString() if oid != nil { @@ -140,7 +145,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re reqProps[xheadKey] = xhead.GetValue() } - reqProps, err = c.fillWithUserClaimTags(reqProps, prm) + reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm) if err != nil { return defaultRequest, err } @@ -155,7 +160,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re prm.Method, aperequest.NewResource( resourceName(prm.Container, prm.Object, prm.Namespace), - objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header), + objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header), ), reqProps, ), nil @@ -177,7 +182,7 @@ func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, heade return nil, fmt.Errorf("EC parent object ID format error: %w", err) } // only container node have access to collect parent object - contNode, err := c.currentNodeIsContainerNode(prm.Container) + contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container) if err != nil { return nil, fmt.Errorf("check container node status: %w", err) } @@ -200,13 +205,13 @@ func isLogicalError(err error) bool { return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound) } -func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { - cnr, err := c.cnrSource.Get(cnrID) +func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) { + cnr, err := c.cnrSource.Get(ctx, cnrID) if err != nil { return false, err } - nm, err := netmap.GetLatestNetworkMap(c.nm) + nm, err := netmap.GetLatestNetworkMap(ctx, c.nm) if err != nil { return false, err } @@ -220,7 +225,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { return true, nil } - nm, err = netmap.GetPreviousNetworkMap(c.nm) + nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm) if err != nil { return false, err } @@ -229,7 +234,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) { +func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } @@ -237,7 +242,7 @@ func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) if err != nil { return nil, err } - props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index 787785b60..fcf7c4c40 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -7,6 +7,7 @@ import ( "testing" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" + cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -19,11 +20,20 @@ import ( ) const ( - testOwnerID = "FPPtmAi9TCX329" + testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y" incomingIP = "192.92.33.1" + + testSysAttrName = "unittest" + + testSysAttrZone = "eggplant" ) +var containerAttrs = map[string]string{ + cnrV2.SysAttributeName: testSysAttrName, + cnrV2.SysAttributeZone: testSysAttrZone, +} + func ctxWithPeerInfo() context.Context { return peer.NewContext(context.Background(), &peer.Peer{ Addr: &net.TCPAddr{ @@ -105,7 +115,7 @@ func TestObjectProperties(t *testing.T) { var testCnrOwner user.ID require.NoError(t, testCnrOwner.DecodeString(testOwnerID)) - props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader()) + props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader()) require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID]) require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID]) @@ -124,6 +134,8 @@ func TestObjectProperties(t *testing.T) { require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType]) require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash]) require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash]) + require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)]) + require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)]) for _, attr := range test.header.attributes { require.Equal(t, attr.val, props[attr.key]) @@ -245,6 +257,10 @@ func TestNewAPERequest(t *testing.T) { Role: role, SenderKey: senderKey, ContainerOwner: testCnrOwner, + ContainerAttributes: map[string]string{ + cnrV2.SysAttributeZone: testSysAttrZone, + cnrV2.SysAttributeName: testSysAttrName, + }, } headerSource := newHeaderProviderMock() @@ -277,7 +293,7 @@ func TestNewAPERequest(t *testing.T) { method, aperequest.NewResource( resourceName(cnr, obj, prm.Namespace), - objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header { + objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header { if headerObjSDK != nil { return headerObjSDK.ToV2().GetHeader() } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index c6d152e0f..5e04843f3 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -2,9 +2,6 @@ package ape import ( "context" - "encoding/hex" - "errors" - "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" @@ -12,19 +9,18 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" ) -var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") - type Service struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.ServiceServer } @@ -64,9 +60,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { return &Service{ apeChecker: apeChecker, + extractor: extractor, next: next, } } @@ -76,15 +73,9 @@ type getStreamBasicChecker struct { apeChecker Checker - namespace string + metadata Metadata - senderKey []byte - - containerOwner user.ID - - role string - - bearerToken *bearer.Token + reqInfo RequestInfo } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { @@ -95,16 +86,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodGetObject, - SenderKey: hex.EncodeToString(g.senderKey), - ContainerOwner: g.containerOwner, - Role: g.role, - BearerToken: g.bearerToken, - XHeaders: resp.GetMetaHeader().GetXHeaders(), + Namespace: g.reqInfo.Namespace, + Container: cnrID, + Object: objID, + Header: partInit.GetHeader(), + Method: nativeschema.MethodGetObject, + SenderKey: g.reqInfo.SenderKey, + ContainerOwner: g.reqInfo.ContainerOwner, + ContainerAttributes: g.reqInfo.ContainerAttributes, + Role: g.reqInfo.Role, + BearerToken: g.metadata.BearerToken, + XHeaders: resp.GetMetaHeader().GetXHeaders(), } if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil { @@ -114,64 +106,54 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { return g.GetObjectStream.Send(resp) } -func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) { - untyped := ctx.Value(objectSvc.RequestContextKey) - if untyped == nil { - return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey) - } - rc, ok := untyped.(*objectSvc.RequestContext) - if !ok { - return nil, errFailedToCastToRequestContext - } - return rc, nil -} - func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) + if err != nil { + return err } - return c.next.Get(request, &getStreamBasicChecker{ GetObjectStream: stream, apeChecker: c.apeChecker, - namespace: reqCtx.Namespace, - senderKey: reqCtx.SenderKey, - containerOwner: reqCtx.ContainerOwner, - role: nativeSchemaRole(reqCtx.Role), - bearerToken: reqCtx.BearerToken, + metadata: md, + reqInfo: reqInfo, }) } type putStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PutObjectStream } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodPutObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: partInit.GetHeader(), + Method: nativeschema.MethodPutObject, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -191,6 +173,7 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { return &putStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } @@ -198,6 +181,8 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { type patchStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PatchObjectStream nonFirstSend bool @@ -207,26 +192,26 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa if !p.nonFirstSend { p.nonFirstSend = true - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodPatchObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodPatchObject, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -246,17 +231,17 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error return &patchStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) if err != nil { return nil, err } @@ -270,7 +255,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj switch headerPart := resp.GetBody().GetHeaderPart().(type) { case *objectV2.ShortHeader: cidV2 := new(refs.ContainerID) - cnrID.WriteToV2(cidV2) + md.Container.WriteToV2(cidV2) header.SetContainerID(cidV2) header.SetVersion(headerPart.GetVersion()) header.SetCreationEpoch(headerPart.GetCreationEpoch()) @@ -286,16 +271,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Header: header, - Method: nativeschema.MethodHeadObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: header, + Method: nativeschema.MethodHeadObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -304,27 +290,25 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - var cnrID cid.ID - if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { - if err := cnrID.ReadFromV2(*cnrV2); err != nil { - return toStatusErr(err) - } - } - - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) + if err != nil { + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Method: nativeschema.MethodSearchObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Method: nativeschema.MethodSearchObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -334,26 +318,26 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) if err != nil { return nil, err } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodDeleteObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodDeleteObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -368,26 +352,26 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - reqCtx, err := requestContext(stream.Context()) + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) if err != nil { - return toStatusErr(err) + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodRangeObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodRangeObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -397,26 +381,26 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Method: nativeschema.MethodHashObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodHashObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -431,27 +415,27 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, - Header: request.GetBody().GetObject().GetHeader(), - Method: nativeschema.MethodPutObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: request.GetBody().GetObject().GetHeader(), + Method: nativeschema.MethodPutObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -461,18 +445,36 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ return c.next.PutSingle(ctx, request) } -func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return - } +type request interface { + GetMetaHeader() *session.RequestMetaHeader + GetVerificationHeader() *session.RequestVerificationHeader +} + +func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin } - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } + cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) + if err != nil { + return + } + session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) + if err != nil { + return + } + bearer, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return + } + + md = Metadata{ + Container: cnrID, + Object: objID, + VerificationHeader: request.GetVerificationHeader(), + SessionToken: session, + BearerToken: bearer, } return } diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go index 46e55360d..97dbfa658 100644 --- a/pkg/services/object/ape/types.go +++ b/pkg/services/object/ape/types.go @@ -7,3 +7,11 @@ import "context" type Checker interface { CheckAPE(context.Context, Prm) error } + +// InnerRingFetcher is an interface that must provide +// Inner Ring information. +type InnerRingFetcher interface { + // InnerRingKeys must return list of public keys of + // the actual inner ring. + InnerRingKeys(ctx context.Context) ([][]byte, error) +} diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/ape/util.go similarity index 58% rename from pkg/services/object/acl/v2/util.go rename to pkg/services/object/ape/util.go index e02f70771..5cd2caa50 100644 --- a/pkg/services/object/acl/v2/util.go +++ b/pkg/services/object/ape/util.go @@ -1,4 +1,4 @@ -package v2 +package ape import ( "crypto/ecdsa" @@ -6,57 +6,34 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) -var errMissingContainerID = errors.New("missing container ID") - -func getContainerIDFromRequest(req any) (cid.ID, error) { - var idV2 *refsV2.ContainerID - var id cid.ID - - switch v := req.(type) { - case *objectV2.GetRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutRequest: - part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit) - if !ok { - return cid.ID{}, errors.New("can't get container ID in chunk") +func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { + if cidV2 != nil { + if err = cnrID.ReadFromV2(*cidV2); err != nil { + return } - - idV2 = part.GetHeader().GetContainerID() - case *objectV2.HeadRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.SearchRequest: - idV2 = v.GetBody().GetContainerID() - case *objectV2.DeleteRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeHashRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutSingleRequest: - idV2 = v.GetBody().GetObject().GetHeader().GetContainerID() - case *objectV2.PatchRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - default: - return cid.ID{}, errors.New("unknown request type") + } else { + err = errMissingContainerID + return } - if idV2 == nil { - return cid.ID{}, errMissingContainerID + if objV2 != nil { + objID = new(oid.ID) + if err = objID.ReadFromV2(*objV2); err != nil { + return + } } - - return id, id.ReadFromV2(*idV2) + return } // originalBearerToken goes down to original request meta header and fetches @@ -75,50 +52,6 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er return &tok, tok.ReadFromV2(*tokV2) } -// originalSessionToken goes down to original request meta header and fetches -// session token from there. -func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) { - for header.GetOrigin() != nil { - header = header.GetOrigin() - } - - tokV2 := header.GetSessionToken() - if tokV2 == nil { - return nil, nil - } - - var tok sessionSDK.Object - - err := tok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - return &tok, nil -} - -// getObjectIDFromRequestBody decodes oid.ID from the common interface of the -// object reference's holders. Returns an error if object ID is missing in the request. -func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) { - idV2 := body.GetAddress().GetObjectID() - return getObjectIDFromRefObjectID(idV2) -} - -func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) { - if idV2 == nil { - return nil, errors.New("missing object ID") - } - - var id oid.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, err - } - - return &id, nil -} - func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { // 1. First check signature of session token. if !token.VerifySignature() { @@ -172,16 +105,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -// assertVerb checks that token verb corresponds to op. -func assertVerb(tok sessionSDK.Object, op acl.Op) bool { - switch op { - case acl.OpObjectPut: +// assertVerb checks that token verb corresponds to the method. +func assertVerb(tok sessionSDK.Object, method string) bool { + switch method { + case nativeschema.MethodPutObject: return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) - case acl.OpObjectDelete: + case nativeschema.MethodDeleteObject: return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case acl.OpObjectGet: + case nativeschema.MethodGetObject: return tok.AssertVerb(sessionSDK.VerbObjectGet) - case acl.OpObjectHead: + case nativeschema.MethodHeadObject: return tok.AssertVerb( sessionSDK.VerbObjectHead, sessionSDK.VerbObjectGet, @@ -190,14 +123,15 @@ func assertVerb(tok sessionSDK.Object, op acl.Op) bool { sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch, ) - case acl.OpObjectSearch: + case nativeschema.MethodSearchObject: return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case acl.OpObjectRange: + case nativeschema.MethodRangeObject: return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) - case acl.OpObjectHash: + case nativeschema.MethodHashObject: return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) + case nativeschema.MethodPatchObject: + return tok.AssertVerb(sessionSDK.VerbObjectPatch) } - return false } @@ -221,3 +155,15 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error return nil } + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid signature key: %w", err) + } + + var idSender user.ID + user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) + + return &idSender, key, nil +} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go new file mode 100644 index 000000000..916bce427 --- /dev/null +++ b/pkg/services/object/ape/util_test.go @@ -0,0 +1,84 @@ +package ape + +import ( + "slices" + "testing" + + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/stretchr/testify/require" +) + +func TestIsVerbCompatible(t *testing.T) { + table := map[string][]sessionSDK.ObjectVerb{ + nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, + nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, + nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, + nativeschema.MethodHeadObject: { + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectPatch, + }, + nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, + nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, + nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, + nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, + } + + verbs := []sessionSDK.ObjectVerb{ + sessionSDK.VerbObjectPut, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectSearch, + sessionSDK.VerbObjectPatch, + } + + var tok sessionSDK.Object + + for op, list := range table { + for _, verb := range verbs { + contains := slices.Contains(list, verb) + + tok.ForVerb(verb) + + require.Equal(t, contains, assertVerb(tok, op), + "%v in token, %s executing", verb, op) + } + } +} + +func TestAssertSessionRelation(t *testing.T) { + var tok sessionSDK.Object + cnr := cidtest.ID() + cnrOther := cidtest.ID() + obj := oidtest.ID() + objOther := oidtest.ID() + + // make sure ids differ, otherwise test won't work correctly + require.False(t, cnrOther.Equals(cnr)) + require.False(t, objOther.Equals(obj)) + + // bind session to the container (required) + tok.BindContainer(cnr) + + // test container-global session + require.NoError(t, assertSessionRelation(tok, cnr, nil)) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnrOther, nil)) + require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) + + // limit the session to the particular object + tok.LimitByObjects(obj) + + // test fixed object session (here obj arg must be non-nil everywhere) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnr, &objOther)) +} diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index dde9f8fc0..f8ee089fe 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -163,7 +163,7 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) @@ -224,7 +224,7 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index 9e0f49297..f2bd907db 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -1,6 +1,7 @@ package target import ( + "context" "errors" "fmt" @@ -13,20 +14,20 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) -func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { // prepare needed put parameters - if err := preparePrm(&prm); err != nil { + if err := preparePrm(ctx, &prm); err != nil { return nil, fmt.Errorf("could not prepare put parameters: %w", err) } if prm.Header.Signature() != nil { - return newUntrustedTarget(&prm) + return newUntrustedTarget(ctx, &prm) } - return newTrustedTarget(&prm) + return newTrustedTarget(ctx, &prm) } -func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() +func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") } @@ -48,9 +49,9 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit }, nil } -func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { prm.Relay = nil // do not relay request without signature - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") } @@ -88,10 +89,8 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter if !ownerObj.Equals(ownerSession) { return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) } - } else { - if !ownerObj.Equals(sessionInfo.Owner) { - return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) - } + } else if !ownerObj.Equals(sessionInfo.Owner) { + return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) } if prm.SignRequestPrivateKey == nil { @@ -111,11 +110,11 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter }, nil } -func preparePrm(prm *objectwriter.Params) error { +func preparePrm(ctx context.Context, prm *objectwriter.Params) error { var err error // get latest network map - nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource) + nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource) if err != nil { return fmt.Errorf("could not get latest network map: %w", err) } @@ -126,7 +125,7 @@ func preparePrm(prm *objectwriter.Params) error { } // get container to store the object - cnrInfo, err := prm.Config.ContainerSource.Get(idCnr) + cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr) if err != nil { return fmt.Errorf("could not get container by ID: %w", err) } diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 3b68efab4..6593d3ca0 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { } func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(n.Traversal.Opts...) + traverser, err := placement.NewTraverser(ctx, n.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } @@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, } // perform additional container broadcast if needed - if n.Traversal.submitPrimaryPlacementFinish() { + if n.submitPrimaryPlacementFinish() { err := n.ForEachNode(ctx, f) if err != nil { n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) @@ -79,11 +79,11 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. continue } - workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey()) + isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey()) item := new(bool) wg.Add(1) - if err := workerPool.Submit(func() { + go func() { defer wg.Done() err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) @@ -95,17 +95,13 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. traverser.SubmitSuccess() *item = true - }); err != nil { - wg.Done() - svcutil.LogWorkerPoolError(ctx, n.cfg.Logger, "PUT", err) - return true - } + }() // Mark the container node as processed in order to exclude it // in subsequent container broadcast. Note that we don't // process this node during broadcast if primary placement // on it failed. - n.Traversal.submitProcessed(addr, item) + n.submitProcessed(addr, item) } wg.Wait() diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go index f7486eae7..fff58aca7 100644 --- a/pkg/services/object/common/writer/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -95,6 +95,10 @@ func (x errIncompletePut) Error() string { return commonMsg } +func (x errIncompletePut) Unwrap() error { + return x.singleErr +} + // WriteObject implements the transformer.ObjectWriter interface. func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { t.obj = obj diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index fdaa569da..26a53e315 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" @@ -84,7 +85,7 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error } func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) { - currentNodeIsContainerNode, err := e.currentNodeIsContainerNode() + currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx) if err != nil { return false, false, err } @@ -107,8 +108,8 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O return true, currentNodeIsContainerNode, nil } -func (e *ECWriter) currentNodeIsContainerNode() (bool, error) { - t, err := placement.NewTraverser(e.PlacementOpts...) +func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) { + t, err := placement.NewTraverser(ctx, e.PlacementOpts...) if err != nil { return false, err } @@ -127,7 +128,7 @@ func (e *ECWriter) currentNodeIsContainerNode() (bool, error) { } func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error { - t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } @@ -148,17 +149,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } - completed := make(chan interface{}) - if poolErr := e.Config.RemotePool.Submit(func() { - defer close(completed) - err = e.Relay(ctx, info, c) - }); poolErr != nil { - close(completed) - svcutil.LogWorkerPoolError(ctx, e.Config.Logger, "PUT", poolErr) - return poolErr - } - <-completed - + err = e.Relay(ctx, info, c) if err == nil { return nil } @@ -179,7 +170,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error return e.writePartLocal(ctx, obj) } - t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) if err != nil { return err } @@ -216,7 +207,7 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er } partsProcessed := make([]atomic.Bool, len(parts)) objID, _ := obj.ID() - t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } @@ -274,6 +265,8 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx err := e.putECPartToNode(ctx, obj, node) if err == nil { return nil + } else if clientSDK.IsErrObjectAlreadyRemoved(err) { + return err } e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), @@ -340,21 +333,11 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n } func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { - var err error localTarget := LocalTarget{ Storage: e.Config.LocalStore, Container: e.Container, } - completed := make(chan interface{}) - if poolErr := e.Config.LocalPool.Submit(func() { - defer close(completed) - err = localTarget.WriteObject(ctx, obj, e.ObjectMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return localTarget.WriteObject(ctx, obj, e.ObjectMeta) } func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { @@ -368,15 +351,5 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n nodeInfo: clientNodeInfo, } - var err error - completed := make(chan interface{}) - if poolErr := e.Config.RemotePool.Submit(func() { - defer close(completed) - err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index 8b2599e5f..d5eeddf21 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -7,6 +7,7 @@ import ( "crypto/sha256" "errors" "fmt" + "slices" "strconv" "testing" @@ -30,7 +31,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" ) @@ -38,11 +38,10 @@ type testPlacementBuilder struct { vectors [][]netmap.NodeInfo } -func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( [][]netmap.NodeInfo, error, ) { - arr := make([]netmap.NodeInfo, len(p.vectors[0])) - copy(arr, p.vectors[0]) + arr := slices.Clone(p.vectors[0]) return [][]netmap.NodeInfo{arr}, nil } @@ -131,17 +130,13 @@ func TestECWriter(t *testing.T) { nodeKey, err := keys.NewPrivateKey() require.NoError(t, err) - pool, err := ants.NewPool(4, ants.WithNonblocking(true)) - require.NoError(t, err) - - log, err := logger.NewLogger(nil) + log, err := logger.NewLogger(logger.Prm{}) require.NoError(t, err) var n nmKeys ecw := ECWriter{ Config: &Config{ NetmapKeys: n, - RemotePool: pool, Logger: log, ClientConstructor: clientConstructor{vectors: ns}, KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go index 0e4c4d9c6..d3d2b41b4 100644 --- a/pkg/services/object/common/writer/writer.go +++ b/pkg/services/object/common/writer/writer.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -24,7 +23,7 @@ type MaxSizeSource interface { // of physically stored object in system. // // Must return 0 if value can not be obtained. - MaxObjectSize() uint64 + MaxObjectSize(context.Context) uint64 } type ClientConstructor interface { @@ -32,7 +31,7 @@ type ClientConstructor interface { } type InnerRing interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type FormatValidatorConfig interface { @@ -52,8 +51,6 @@ type Config struct { NetmapSource netmap.Source - RemotePool, LocalPool util.WorkerPool - NetmapKeys netmap.AnnouncedKeys FormatValidator *object.FormatValidator @@ -69,12 +66,6 @@ type Config struct { type Option func(*Config) -func WithWorkerPools(remote, local util.WorkerPool) Option { - return func(c *Config) { - c.RemotePool, c.LocalPool = remote, local - } -} - func WithLogger(l *logger.Logger) Option { return func(c *Config) { c.Logger = l @@ -87,13 +78,6 @@ func WithVerifySessionTokenIssuer(v bool) Option { } } -func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) { - if c.NetmapKeys.IsLocalKey(pub) { - return c.LocalPool, true - } - return c.RemotePool, false -} - type Params struct { Config *Config diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index 8aaff670c..57e33fde7 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -36,7 +36,7 @@ func (exec *execCtx) execute(ctx context.Context) error { exec.log.Debug(ctx, logs.ServingRequest) if err := exec.executeLocal(ctx); err != nil { - exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) return err } diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index 36a17bde2..a99ba3586 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) { for i := range members { for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body if members[i].Equals(incoming[j]) { - incoming = append(incoming[:j], incoming[j+1:]...) + incoming = slices.Delete(incoming, j, j+1) j-- } } diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 867d3f4ef..1c4d7d585 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "objectSDK.Delete service")) + c.log = l } } diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index e164627d2..e80132489 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque detachedExecutor.execute(ctx) - return detachedExecutor.statusError.err + return detachedExecutor.err } diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index ff3f90bf2..b24c9417b 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -2,6 +2,7 @@ package getsvc import ( "context" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -59,53 +60,24 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS if previousID == nil && len(childrenIDs) == 0 { return nil, objectSDK.NewSplitInfoError(a.splitInfo) } + if len(childrenIDs) > 0 { - if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil { - return nil, err + if a.rng != nil { + err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer) + } else { + err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer) } } else { - if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil { - return nil, err + if a.rng != nil { + err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer) + } else { + err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer) } } - return a.parentObject, nil -} - -func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - var sourceObjectIDs []oid.ID - sourceObjectID, ok := a.splitInfo.Link() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - sourceObjectID, ok = a.splitInfo.LastPart() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - if len(sourceObjectIDs) == 0 { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - for _, sourceObjectID = range sourceObjectIDs { - obj, err := a.getParent(ctx, sourceObjectID, writer) - if err == nil { - return obj, nil - } - } - return nil, objectSDK.NewSplitInfoError(a.splitInfo) -} - -func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) if err != nil { return nil, err } - parent := obj.Parent() - if parent == nil { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - if err := writer.WriteHeader(ctx, parent); err != nil { - return nil, err - } - return obj, nil + return a.parentObject, nil } func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) { @@ -190,26 +162,16 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD } func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if a.rng == nil { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } - return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true) - } - - if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { return err } - return writer.WriteChunk(ctx, a.parentObject.Payload()) + return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true) } func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if a.rng == nil { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { + return err } - if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil { return err } @@ -219,16 +181,9 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev return nil } -func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error { - withRng := len(partRanges) > 0 && a.rng != nil - +func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error { for i := range partIDs { - var r *objectSDK.Range - if withRng { - r = &partRanges[i] - } - - _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer) + _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer) if err != nil { return err } @@ -237,22 +192,13 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec } func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, rngs, err := a.buildChain(ctx, prevID) + chain, err := a.buildChain(ctx, prevID) if err != nil { return err } - reverseRngs := len(rngs) > 0 - - for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { - chain[left], chain[right] = chain[right], chain[left] - - if reverseRngs { - rngs[left], rngs[right] = rngs[right], rngs[left] - } - } - - return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false) + slices.Reverse(chain) + return a.assemblePayloadByObjectIDs(ctx, writer, chain, false) } func (a *assembler) isChild(obj *objectSDK.Object) bool { @@ -260,63 +206,28 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool { return parent == nil || equalAddresses(a.addr, object.AddressOf(parent)) } -func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { +func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) { var ( chain []oid.ID - rngs []objectSDK.Range - from = a.rng.GetOffset() - to = from + a.rng.GetLength() hasPrev = true ) // fill the chain end-to-start for hasPrev { - // check that only for "range" requests, - // for `GET` it stops via the false `withPrev` - if a.rng != nil && a.currentOffset <= from { - break - } - head, err := a.objGetter.HeadObject(ctx, prevID) if err != nil { - return nil, nil, err + return nil, err } if !a.isChild(head) { - return nil, nil, errParentAddressDiffers + return nil, errParentAddressDiffers } - if a.rng != nil { - sz := head.PayloadSize() - - a.currentOffset -= sz - - if a.currentOffset < to { - off := uint64(0) - if from > a.currentOffset { - off = from - a.currentOffset - sz -= from - a.currentOffset - } - - if to < a.currentOffset+off+sz { - sz = to - off - a.currentOffset - } - - index := len(rngs) - rngs = append(rngs, objectSDK.Range{}) - rngs[index].SetOffset(off) - rngs[index].SetLength(sz) - - id, _ := head.ID() - chain = append(chain, id) - } - } else { - id, _ := head.ID() - chain = append(chain, id) - } + id, _ := head.ID() + chain = append(chain, id) prevID, hasPrev = head.PreviousID() } - return chain, rngs, nil + return chain, nil } diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go new file mode 100644 index 000000000..ff213cb82 --- /dev/null +++ b/pkg/services/object/get/assembler_head.go @@ -0,0 +1,45 @@ +package getsvc + +import ( + "context" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { + var sourceObjectIDs []oid.ID + sourceObjectID, ok := a.splitInfo.Link() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + sourceObjectID, ok = a.splitInfo.LastPart() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + if len(sourceObjectIDs) == 0 { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + for _, sourceObjectID = range sourceObjectIDs { + obj, err := a.getParent(ctx, sourceObjectID, writer) + if err == nil { + return obj, nil + } + } + return nil, objectSDK.NewSplitInfoError(a.splitInfo) +} + +func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { + obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) + if err != nil { + return nil, err + } + parent := obj.Parent() + if parent == nil { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + if err := writer.WriteHeader(ctx, parent); err != nil { + return nil, err + } + return obj, nil +} diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go new file mode 100644 index 000000000..780693c40 --- /dev/null +++ b/pkg/services/object/get/assembler_range.go @@ -0,0 +1,87 @@ +package getsvc + +import ( + "context" + "slices" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { + if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { + return err + } + return writer.WriteChunk(ctx, a.parentObject.Payload()) +} + +func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { + if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil { + return err + } + if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part + return err + } + return nil +} + +func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error { + for i := range partIDs { + _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer) + if err != nil { + return err + } + } + return nil +} + +func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { + chain, rngs, err := a.buildChainRange(ctx, prevID) + if err != nil { + return err + } + + slices.Reverse(chain) + slices.Reverse(rngs) + return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs) +} + +func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { + var ( + chain []oid.ID + rngs []objectSDK.Range + from = a.rng.GetOffset() + to = from + a.rng.GetLength() + + hasPrev = true + ) + + // fill the chain end-to-start + for hasPrev && from < a.currentOffset { + head, err := a.objGetter.HeadObject(ctx, prevID) + if err != nil { + return nil, nil, err + } + if !a.isChild(head) { + return nil, nil, errParentAddressDiffers + } + + nextOffset := a.currentOffset - head.PayloadSize() + clampedFrom := max(from, nextOffset) + clampedTo := min(to, a.currentOffset) + if clampedFrom < clampedTo { + index := len(rngs) + rngs = append(rngs, objectSDK.Range{}) + rngs[index].SetOffset(clampedFrom - nextOffset) + rngs[index].SetLength(clampedTo - clampedFrom) + + id, _ := head.ID() + chain = append(chain, id) + } + + a.currentOffset = nextOffset + prevID, hasPrev = head.PreviousID() + } + + return chain, rngs, nil +} diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index b0895e13e..e0a7e1da6 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -125,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) { objID := a.addr.Object() - trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch) + trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch) if err != nil { return nil, err } @@ -238,15 +238,13 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object var object *objectSDK.Object if a.head { object, err = a.localStorage.Head(ctx, addr, false) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } else { object, err = a.localStorage.Get(ctx, addr) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } return object @@ -286,15 +284,13 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli var object *objectSDK.Object if a.head { object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } else { object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } return object diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index 0ee8aed53..dfb31133c 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -28,16 +28,7 @@ func (r *request) executeOnContainer(ctx context.Context) { localStatus := r.status - for { - if r.processCurrentEpoch(ctx, localStatus) { - break - } - - // check the maximum depth has been reached - if lookupDepth == 0 { - break - } - + for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { lookupDepth-- // go to the previous epoch diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 557e9a028..3a50308c2 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { exec.execute(ctx) - return exec.statusError.err + return exec.err } -func (exec *request) execute(ctx context.Context) { - exec.log.Debug(ctx, logs.ServingRequest) +func (r *request) execute(ctx context.Context) { + r.log.Debug(ctx, logs.ServingRequest) // perform local operation - exec.executeLocal(ctx) + r.executeLocal(ctx) - exec.analyzeStatus(ctx, true) + r.analyzeStatus(ctx, true) } -func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { +func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result - switch exec.status { + switch r.status { case statusOK: - exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) + r.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) + r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) - exec.assemble(ctx) + r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) + r.assemble(ctx) case statusOutOfRange: - exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) + r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - exec.log.Debug(ctx, logs.GetRequestedObjectIsEC) - if exec.isRaw() && execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.log.Debug(ctx, logs.GetRequestedObjectIsEC) + if r.isRaw() && execCnr { + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } - exec.assembleEC(ctx) + r.assembleEC(ctx) default: - exec.log.Debug(ctx, logs.OperationFinishedWithError, - zap.Error(exec.err), + r.log.Debug(ctx, logs.OperationFinishedWithError, + zap.Error(r.err), ) var errAccessDenied *apistatus.ObjectAccessDenied - if execCnr && errors.As(exec.err, &errAccessDenied) { + if execCnr && errors.As(r.err, &errAccessDenied) { // Local get can't return access denied error, so this error was returned by // write to the output stream. So there is no need to try to find object on other nodes. return } if execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } } } diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 6827018dc..3efc72065 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -63,7 +63,7 @@ type testClient struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch() (uint64, error) { +func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { return uint64(e), nil } @@ -79,7 +79,7 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { +func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { opts := make([]placement.Option, 0, 4) opts = append(opts, placement.ForContainer(g.c), @@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui opts = append(opts, placement.ForObject(*obj)) } - t, err := placement.NewTraverser(opts...) + t, err := placement.NewTraverser(context.Background(), opts...) return t, &containerCore.Container{ Value: g.c, }, err } -func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index 599a6f176..83ef54744 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -28,14 +28,14 @@ type containerStorage struct { cnt *container.Container } -func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) { coreCnt := coreContainer.Container{ Value: *cs.cnt, } return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go index 0df67dec9..2c64244cf 100644 --- a/pkg/services/object/get/remote_getter.go +++ b/pkg/services/object/get/remote_getter.go @@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob if err != nil { return nil, err } - epoch, err := g.es.Epoch() + epoch, err := g.es.Epoch(ctx) if err != nil { return nil, err } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index be0950c60..268080486 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -122,7 +122,7 @@ func (r *request) initEpoch(ctx context.Context) bool { return true } - e, err := r.epochSource.Epoch() + e, err := r.epochSource.Epoch(ctx) switch { default: @@ -141,7 +141,7 @@ func (r *request) initEpoch(ctx context.Context) bool { func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { obj := addr.Object() - t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) + t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch) switch { default: diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index 9ec10b5f2..a103f5a7f 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = l.With(zap.String("component", "Object.Get service")) + s.log = l } } diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go index 9669afdba..664366d1b 100644 --- a/pkg/services/object/get/types.go +++ b/pkg/services/object/get/types.go @@ -20,11 +20,11 @@ import ( ) type epochSource interface { - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } type traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } type keyStorage interface { diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go index 7d26a38c3..308ccd512 100644 --- a/pkg/services/object/get/v2/get_range_hash.go +++ b/pkg/services/object/get/v2/get_range_hash.go @@ -22,7 +22,7 @@ import ( // GetRangeHash calls internal service and returns v2 response. func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - forward, err := s.needToForwardGetRangeHashRequest(req) + forward, err := s.needToForwardGetRangeHashRequest(ctx, req) if err != nil { return nil, err } @@ -48,7 +48,7 @@ type getRangeForwardParams struct { address oid.Address } -func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { +func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { if req.GetMetaHeader().GetTTL() <= 1 { return getRangeForwardParams{}, nil } @@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq } result.address = addr - cont, err := s.contSource.Get(addr.Container()) + cont, err := s.contSource.Get(ctx, addr.Container()) if err != nil { return result, fmt.Errorf("(%T) could not get container: %w", s, err) } - epoch, err := s.netmapSource.Epoch() + epoch, err := s.netmapSource.Epoch(ctx) if err != nil { return result, fmt.Errorf("(%T) could not get epoch: %w", s, err) } - nm, err := s.netmapSource.GetNetMapByEpoch(epoch) + nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch) if err != nil { return result, fmt.Errorf("(%T) could not get netmap: %w", s, err) } @@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq builder := placement.NewNetworkMapBuilder(nm) objectID := addr.Object() - nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy()) + nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy()) if err != nil { return result, fmt.Errorf("(%T) could not build object placement: %w", s, err) } diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index fc483b74b..0ec8912fd 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Object.Get V2 service")) + c.log = l } } diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index 98207336c..0d73bcd4d 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec p.SetHeader(objV2.GetHeader()) p.SetSignature(objV2.GetSignature()) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { p := new(objectV2.GetObjectPartChunk) p.SetChunk(chunk) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { @@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { } func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) + return s.Send(newRangeResponse(chunk)) } func newRangeResponse(p []byte) *objectV2.GetRangeResponse { diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index bfa7fd619..e699a3779 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -3,6 +3,7 @@ package getsvc import ( "context" "crypto/sha256" + "errors" "hash" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -182,9 +183,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran default: return nil, errUnknownChechsumType(t) case refs.SHA256: - p.SetHashGenerator(func() hash.Hash { - return sha256.New() - }) + p.SetHashGenerator(sha256.New) case refs.TillichZemor: p.SetHashGenerator(func() hash.Hash { return tz.New() @@ -360,19 +359,20 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { var err error - - defer func() { - stop = err == nil - - if stop || firstErr == nil { - firstErr = err - } - - // would be nice to log otherwise - }() - res, err = f(ctx, addr, c, key) + // non-status logic error that could be returned + // from the SDK client; should not be considered + // as a connection error + var siErr *objectSDK.SplitInfoError + var eiErr *objectSDK.ECInfoError + + stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr) + + if stop || firstErr == nil { + firstErr = err + } + return }) diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go index 2c405070d..3e8832640 100644 --- a/pkg/services/object/internal/client/client.go +++ b/pkg/services/object/internal/client/client.go @@ -7,9 +7,11 @@ import ( "errors" "fmt" "io" + "strconv" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -31,6 +33,8 @@ type commonPrm struct { local bool xHeaders []string + + netmapEpoch uint64 } // SetClient sets base client for ForstFS API communication. @@ -73,6 +77,14 @@ func (x *commonPrm) SetXHeaders(hs []string) { x.xHeaders = hs } +func (x *commonPrm) calculateXHeaders() []string { + hs := x.xHeaders + if x.netmapEpoch != 0 { + hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10)) + } + return hs +} + type readPrmCommon struct { commonPrm } @@ -80,8 +92,8 @@ type readPrmCommon struct { // SetNetmapEpoch sets the epoch number to be used to locate the objectSDK. // // By default current epoch on the server will be used. -func (x *readPrmCommon) SetNetmapEpoch(_ uint64) { - // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465 +func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) { + x.netmapEpoch = epoch } // GetObjectPrm groups parameters of GetObject operation. @@ -139,7 +151,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { prm.ClientParams.Session = prm.tokenSession } - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local prm.ClientParams.Key = prm.key @@ -233,7 +245,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams) if err == nil { @@ -326,7 +338,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e prm.ClientParams.Session = prm.tokenSession } - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local prm.ClientParams.Length = prm.ln @@ -390,7 +402,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { defer span.End() prmCli := client.PrmObjectPutInit{ - XHeaders: prm.xHeaders, + XHeaders: prm.calculateXHeaders(), BearerToken: prm.tokenBearer, Session: prm.tokenSession, Local: true, @@ -437,7 +449,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro } prmCli := client.PrmObjectPutSingle{ - XHeaders: prm.xHeaders, + XHeaders: prm.calculateXHeaders(), BearerToken: prm.tokenBearer, Session: prm.tokenSession, Local: true, @@ -496,7 +508,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes prm.cliPrm.Local = prm.local prm.cliPrm.Session = prm.tokenSession prm.cliPrm.BearerToken = prm.tokenBearer - prm.cliPrm.XHeaders = prm.xHeaders + prm.cliPrm.XHeaders = prm.calculateXHeaders() prm.cliPrm.Key = prm.key rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm) diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 19748e938..6a6ee0f0f 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -4,6 +4,7 @@ import ( "context" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) @@ -34,7 +35,7 @@ type ( } MetricRegister interface { - AddRequestDuration(string, time.Duration, bool) + AddRequestDuration(string, time.Duration, bool, string) AddPayloadSize(string, int) } ) @@ -51,7 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er if m.enabled { t := time.Now() defer func() { - m.metrics.AddRequestDuration("Get", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) }() err = m.next.Get(req, &getStreamMetric{ ServerStream: stream, @@ -106,7 +107,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl res, err := m.next.PutSingle(ctx, request) - m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil) + m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) if err == nil { m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload())) } @@ -122,7 +123,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) res, err := m.next.Head(ctx, request) - m.metrics.AddRequestDuration("Head", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -135,7 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream) err := m.next.Search(req, stream) - m.metrics.AddRequestDuration("Search", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -148,7 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque res, err := m.next.Delete(ctx, request) - m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } return m.next.Delete(ctx, request) @@ -160,7 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR err := m.next.GetRange(req, stream) - m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -173,7 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa res, err := m.next.GetRangeHash(ctx, request) - m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -209,7 +210,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -223,7 +224,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go index 953f82b48..5d298bfed 100644 --- a/pkg/services/object/patch/service.go +++ b/pkg/services/object/patch/service.go @@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config, // Patch calls internal service and returns v2 object streamer. func (s *Service) Patch() (object.PatchObjectStream, error) { - nodeKey, err := s.Config.KeyStorage.GetKey(nil) + nodeKey, err := s.KeyStorage.GetKey(nil) if err != nil { return nil, err } diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 91b4efdc1..ff13b1d3e 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { } oV2.GetHeader().SetOwnerID(ownerID) - target, err := target.New(objectwriter.Params{ + target, err := target.New(ctx, objectwriter.Params{ Config: s.Config, Common: commonPrm, Header: objectSDK.NewFromV2(oV2), @@ -195,7 +195,12 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { patch.FromV2(req.GetBody()) if !s.nonFirstSend { - err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes) + err := s.patcher.ApplyHeaderPatch(ctx, + patcher.ApplyHeaderPatchPrm{ + NewSplitHeader: patch.NewSplitHeader, + NewAttributes: patch.NewAttributes, + ReplaceAttributes: patch.ReplaceAttributes, + }) if err != nil { return fmt.Errorf("patch attributes: %w", err) } @@ -214,6 +219,9 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { } func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + if s.patcher == nil { + return nil, errors.New("uninitialized patch streamer") + } patcherResp, err := s.patcher.Close(ctx) if err != nil { return nil, err diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 5cc0a5722..7aeb5857d 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -27,8 +26,6 @@ func NewService(ks *objutil.KeyStorage, opts ...objectwriter.Option, ) *Service { c := &objectwriter.Config{ - RemotePool: util.NewPseudoWorkerPool(), - LocalPool: util.NewPseudoWorkerPool(), Logger: logger.NewLoggerWrapper(zap.L()), KeyStorage: ks, ClientConstructor: cc, @@ -59,8 +56,8 @@ func NewService(ks *objutil.KeyStorage, } } -func (p *Service) Put() (*Streamer, error) { +func (s *Service) Put() (*Streamer, error) { return &Streamer{ - Config: p.Config, + Config: s.Config, }, nil } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 36b0bd54c..90f473254 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -21,7 +21,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" @@ -29,6 +28,7 @@ import ( sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/tzhash/tz" @@ -86,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest } func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.validarePutSingleSize(obj); err != nil { + if err := s.validarePutSingleSize(ctx, obj); err != nil { return object.ContentMeta{}, err } @@ -97,12 +97,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) return s.validatePutSingleObject(ctx, obj) } -func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error { +func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error { if uint64(len(obj.Payload())) != obj.PayloadSize() { return target.ErrWrongPayloadSize } - maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize() + maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) if obj.PayloadSize() > maxAllowedSize { return target.ErrExceedingMaxSize } @@ -153,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { localOnly := req.GetMetaHeader().GetTTL() <= 1 - placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly) + placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly) if err != nil { return err } @@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o } func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.Config.NewNodeIterator(placement.placementOptions) + iter := s.NewNodeIterator(placement.placementOptions) iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast signer := &putSingleRequestSigner{ req: req, - keyStorage: s.Config.KeyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } @@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace if err != nil { return err } - key, err := s.Config.KeyStorage.GetKey(nil) + key, err := s.KeyStorage.GetKey(nil) if err != nil { return err } signer := &putSingleRequestSigner{ req: req, - keyStorage: s.Config.KeyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } @@ -218,14 +218,14 @@ type putSinglePlacement struct { resetSuccessAfterOnBroadcast bool } -func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { +func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { var result putSinglePlacement cnrID, ok := obj.ContainerID() if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.Config.ContainerSource.Get(cnrID) + cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource) + latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } builder := placement.NewNetworkMapBuilder(latestNetmap) if localOnly { result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys) + builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) } result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) return result, nil @@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - c, err := s.Config.ClientConstructor.Get(info) + c, err := s.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } @@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { localTarget := &objectwriter.LocalTarget{ - Storage: s.Config.LocalStore, + Storage: s.LocalStore, Container: container, } return localTarget.WriteObject(ctx, obj, meta) @@ -317,12 +317,11 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, + s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), zap.Stringer("container_id", cnrID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } @@ -351,8 +350,12 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, err = signature.VerifyServiceMessage(resp) if err != nil { err = fmt.Errorf("response verification failed: %w", err) + return } + st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus()) + err = apistatus.ErrFromStatus(st) + return }) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index f71309d31..19768b7fa 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -36,7 +36,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { } var err error - p.target, err = target.New(prmTarget) + p.target, err = target.New(ctx, prmTarget) if err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) } diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index 36b514fbc..f0c648187 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -56,10 +56,10 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) s.saveChunks = v.GetSignature() != nil if s.saveChunks { - maxSz := s.stream.MaxSizeSrc.MaxObjectSize() + maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx) s.sizes = &sizes{ - payloadSz: uint64(v.GetHeader().GetPayloadLength()), + payloadSz: v.GetHeader().GetPayloadLength(), } // check payload size limit overflow diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go new file mode 100644 index 000000000..01eb1ea8d --- /dev/null +++ b/pkg/services/object/qos.go @@ -0,0 +1,145 @@ +package object + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" +) + +var _ ServiceServer = (*qosObjectService)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type qosObjectService struct { + next ServiceServer + adj AdjustIOTag +} + +func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer { + return &qosObjectService{ + next: next, + adj: adjIOTag, + } +} + +func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Delete(ctx, req) +} + +func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Get(req, &qosReadStream[*object.GetResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRangeHash(ctx, req) +} + +func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Head(ctx, req) +} + +func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) { + s, err := q.next.Patch(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) { + s, err := q.next.Put(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PutRequest, *object.PutResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.PutSingle(ctx, req) +} + +func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Search(req, &qosReadStream[*object.SearchResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosReadStream[T any] struct { + sender qosSend[T] + ctxF func() context.Context +} + +func (g *qosReadStream[T]) Context() context.Context { + return g.ctxF() +} + +func (g *qosReadStream[T]) Send(resp T) error { + return g.sender.Send(resp) +} + +type qosVerificationHeader interface { + GetVerificationHeader() *session.RequestVerificationHeader +} + +type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { + Send(context.Context, TReq) error + CloseAndRecv(context.Context) (TResp, error) +} + +type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { + s qosSendRecv[TReq, TResp] + adj AdjustIOTag + + ioTag string + ioTagDefined bool +} + +func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { + if q.ioTagDefined { + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + } + return q.s.CloseAndRecv(ctx) +} + +func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { + if !q.ioTagDefined { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx) + } + assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment") + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + return q.s.Send(ctx, req) +} diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go deleted file mode 100644 index eb4041f80..000000000 --- a/pkg/services/object/request_context.go +++ /dev/null @@ -1,24 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type RequestContextKeyT struct{} - -var RequestContextKey = RequestContextKeyT{} - -// RequestContext is a context passed between middleware handlers. -type RequestContext struct { - Namespace string - - SenderKey []byte - - ContainerOwner user.ID - - Role acl.Role - - BearerToken *bearer.Token -} diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index 999a3cc9e..60d469b11 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -20,7 +20,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error { ) // initialize epoch number - if err := exec.initEpoch(); err != nil { + if err := exec.initEpoch(ctx); err != nil { return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err) } @@ -48,7 +48,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { zap.Uint64("number", exec.curProcEpoch), ) - traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch) + traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch) if err != nil { return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err) } @@ -73,7 +73,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { select { case <-ctx.Done(): exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext, - zap.String("error", ctx.Err().Error())) + zap.Error(ctx.Err())) return default: } @@ -86,14 +86,14 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { c, err := exec.svc.clientConstructor.get(info) if err != nil { - exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err)) return } ids, err := c.searchObjects(ctx, exec, info) if err != nil { exec.log.Debug(ctx, logs.SearchRemoteOperationFailed, - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { err = exec.writeIDList(ids) mtx.Unlock() if err != nil { - exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err)) return } }(i) @@ -114,9 +114,9 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { return nil } -func (exec *execCtx) getContainer() (containerSDK.Container, error) { +func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) { cnrID := exec.containerID() - cnr, err := exec.svc.containerSource.Get(cnrID) + cnr, err := exec.svc.containerSource.Get(ctx, cnrID) if err != nil { return containerSDK.Container{}, err } diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index eb9635f14..ced51ecce 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -1,6 +1,8 @@ package searchsvc import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -48,13 +50,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 { return exec.prm.common.NetmapLookupDepth() } -func (exec *execCtx) initEpoch() error { +func (exec *execCtx) initEpoch(ctx context.Context) error { exec.curProcEpoch = exec.netmapEpoch() if exec.curProcEpoch > 0 { return nil } - e, err := exec.svc.currentEpochReceiver.Epoch() + e, err := exec.svc.currentEpochReceiver.Epoch(ctx) if err != nil { return err } diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index bc59d0394..ec65ab06a 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -11,7 +11,7 @@ import ( func (exec *execCtx) executeLocal(ctx context.Context) error { ids, err := exec.svc.localStorage.search(ctx, exec) if err != nil { - exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err)) return err } diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index e24da975d..76c091f85 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -38,7 +38,7 @@ func (exec *execCtx) execute(ctx context.Context) error { func (exec *execCtx) logResult(ctx context.Context, err error) { switch { default: - exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) case err == nil: exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) } diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 0a40025e1..918ad421f 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "errors" "fmt" + "slices" "strconv" "testing" @@ -58,7 +59,7 @@ type simpleIDWriter struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch() (uint64, error) { +func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { return uint64(e), nil } @@ -81,8 +82,8 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { - t, err := placement.NewTraverser( +func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { + t, err := placement.NewTraverser(context.Background(), placement.ForContainer(g.c), placement.UseBuilder(g.b[epoch]), placement.WithoutSuccessTracking(), @@ -90,7 +91,7 @@ func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch ui return t, &containerCore.Container{Value: g.c}, err } -func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) @@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap. return nil, errors.New("vectors for address not found") } - res := make([][]netmap.NodeInfo, len(vs)) - copy(res, vs) + res := slices.Clone(vs) return res, nil } diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index 77d25357a..56fe56468 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -46,11 +46,11 @@ type cfg struct { } traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } currentEpochReceiver interface { - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } keyStore *util.KeyStorage @@ -94,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Object.Search service")) + c.log = l } } diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index 910384a0b..0be5345b9 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -2,6 +2,7 @@ package searchsvc import ( "context" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -53,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error { } // exclude processed address - list = append(list[:i], list[i+1:]...) + list = slices.Delete(list, i, i+1) i-- } @@ -113,7 +114,7 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c } func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { - cnr, err := exec.getContainer() + cnr, err := exec.getContainer(ctx) if err != nil { return nil, err } diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index 2b44227a5..fd8e926dd 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -96,7 +96,8 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PutResponse) } } @@ -132,7 +133,8 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PatchResponse) } } diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index 0b3676edb..b446d3605 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error { var newResp *object.SearchResponse - for ln := uint64(len(ids)); ; { + for { if newResp == nil { newResp = new(object.SearchResponse) newResp.SetBody(body) } - cut := min(s.addrAmount, ln) + cut := min(s.addrAmount, uint64(len(ids))) body.SetIDList(ids[:cut]) newResp.SetMetaHeader(resp.GetMetaHeader()) diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index a9f875d8d..b10826226 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -14,14 +14,6 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net l.Error(ctx, logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), - zap.String("error", err.Error()), - ) -} - -// LogWorkerPoolError writes debug error message of object worker pool to provided logger. -func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { - l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, - zap.String("request", req), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go index 1bd39f9ea..f74b0aab9 100644 --- a/pkg/services/object/util/placement.go +++ b/pkg/services/object/util/placement.go @@ -1,7 +1,9 @@ package util import ( + "context" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -43,8 +45,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu } } -func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(cnr, obj, policy) +func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -76,8 +78,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac } } -func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(cnr, obj, policy) +func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -92,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapS } if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - vs[i] = append(vs[i][:j], vs[i][j+1:]...) + vs[i] = slices.Delete(vs[i], j, j+1) j-- } } @@ -122,15 +124,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav // GenerateTraverser generates placement Traverser for provided object address // using epoch-th network map. -func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { +func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { // get network map by epoch - nm, err := g.netMapSrc.GetNetMapByEpoch(epoch) + nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch) if err != nil { return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err) } // get container related container - cnr, err := g.cnrSrc.Get(idCnr) + cnr, err := g.cnrSrc.Get(ctx, idCnr) if err != nil { return nil, nil, fmt.Errorf("could not get container: %w", err) } @@ -160,7 +162,7 @@ func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoc ) } - t, err := placement.NewTraverser(traverseOpts...) + t, err := placement.NewTraverser(ctx, traverseOpts...) if err != nil { return nil, nil, err } diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go index a890d5357..7242970b5 100644 --- a/pkg/services/object_manager/placement/cache_test.go +++ b/pkg/services/object_manager/placement/cache_test.go @@ -85,7 +85,10 @@ func TestContainerNodesCache(t *testing.T) { }) t.Run("the error is propagated", func(t *testing.T) { var pp netmapSDK.PlacementPolicy - require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X")) + r := netmapSDK.ReplicaDescriptor{} + r.SetNumberOfObjects(1) + r.SetSelectorName("Missing") + pp.AddReplicas(r) c := placement.NewContainerNodesCache(size) _, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp) diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go index 45e6df339..0f24a9d96 100644 --- a/pkg/services/object_manager/placement/metrics.go +++ b/pkg/services/object_manager/placement/metrics.go @@ -2,24 +2,90 @@ package placement import ( "errors" + "fmt" + "maps" + "math" "strings" + "sync" + "sync/atomic" + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" + locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) const ( attrPrefix = "$attribute:" + + geoDistance = "$geoDistance" ) type Metric interface { CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int } -func ParseMetric(raw string) (Metric, error) { - if attr, found := strings.CutPrefix(raw, attrPrefix); found { - return NewAttributeMetric(attr), nil +type metricsParser struct { + locodeDBPath string + locodes map[string]locodedb.Point +} + +type MetricParser interface { + ParseMetrics([]string) ([]Metric, error) +} + +func NewMetricsParser(locodeDBPath string) (MetricParser, error) { + return &metricsParser{ + locodeDBPath: locodeDBPath, + }, nil +} + +func (p *metricsParser) initLocodes() error { + if len(p.locodes) != 0 { + return nil } - return nil, errors.New("unsupported priority metric") + if len(p.locodeDBPath) > 0 { + p.locodes = make(map[string]locodedb.Point) + locodeDB := locodebolt.New(locodebolt.Prm{ + Path: p.locodeDBPath, + }, + locodebolt.ReadOnly(), + ) + err := locodeDB.Open() + if err != nil { + return err + } + defer locodeDB.Close() + err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { + p.locodes[k] = v + }) + if err != nil { + return err + } + return nil + } + return errors.New("set path to locode database") +} + +func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { + var metrics []Metric + for _, raw := range priority { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + metrics = append(metrics, NewAttributeMetric(attr)) + } else if raw == geoDistance { + err := p.initLocodes() + if err != nil { + return nil, err + } + if len(p.locodes) == 0 { + return nil, fmt.Errorf("provide locodes database for metric %s", raw) + } + m := NewGeoDistanceMetric(p.locodes) + metrics = append(metrics, m) + } else { + return nil, fmt.Errorf("unsupported priority metric %s", raw) + } + } + return metrics, nil } // attributeMetric describes priority metric based on attribute. @@ -41,3 +107,79 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node func NewAttributeMetric(attr string) Metric { return &attributeMetric{attribute: attr} } + +// geoDistanceMetric describes priority metric based on attribute. +type geoDistanceMetric struct { + locodes map[string]locodedb.Point + distance *atomic.Pointer[map[string]int] + mtx sync.Mutex +} + +func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { + d := atomic.Pointer[map[string]int]{} + m := make(map[string]int) + d.Store(&m) + gm := &geoDistanceMetric{ + locodes: locodes, + distance: &d, + } + return gm +} + +// CalculateValue return distance in kilometers between current node and provided, +// if coordinates for provided node found. In other case return math.MaxInt. +func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { + fl := from.LOCODE() + tl := to.LOCODE() + if fl == tl { + return 0 + } + m := gm.distance.Load() + if v, ok := (*m)[fl+tl]; ok { + return v + } + return gm.calculateDistance(fl, tl) +} + +func (gm *geoDistanceMetric) calculateDistance(from, to string) int { + gm.mtx.Lock() + defer gm.mtx.Unlock() + od := gm.distance.Load() + if v, ok := (*od)[from+to]; ok { + return v + } + nd := maps.Clone(*od) + var dist int + pointFrom, okFrom := gm.locodes[from] + pointTo, okTo := gm.locodes[to] + if okFrom && okTo { + dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) + } else { + dist = math.MaxInt + } + nd[from+to] = dist + gm.distance.Store(&nd) + + return dist +} + +// distance return amount of KM between two points. +// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. +// Original implementation can be found here https://www.geodatasource.com/developers/go. +func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { + radLat1 := math.Pi * lt1 / 180 + radLat2 := math.Pi * lt2 / 180 + radTheta := math.Pi * (ln1 - ln2) / 180 + + dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) + + if dist > 1 { + dist = 1 + } + + dist = math.Acos(dist) + dist = dist * 180 / math.Pi + dist = dist * 60 * 1.1515 * 1.609344 + + return dist +} diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go index 1782e27ea..b3f8d9c03 100644 --- a/pkg/services/object_manager/placement/netmap.go +++ b/pkg/services/object_manager/placement/netmap.go @@ -1,6 +1,7 @@ package placement import ( + "context" "crypto/sha256" "fmt" @@ -35,12 +36,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder { } } -func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) { +func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) { return s.nm, nil } -func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - nm, err := netmap.GetLatestNetworkMap(b.nmSrc) +func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc) if err != nil { return nil, fmt.Errorf("could not get network map: %w", err) } diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 7c720b204..a3f9af959 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -1,6 +1,7 @@ package placement import ( + "context" "errors" "fmt" "slices" @@ -21,7 +22,7 @@ type Builder interface { // // Must return all container nodes if object identifier // is nil. - BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) + BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) } type NodeState interface { @@ -78,7 +79,7 @@ func defaultCfg() *cfg { } // NewTraverser creates, initializes with options and returns Traverser instance. -func NewTraverser(opts ...Option) (*Traverser, error) { +func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { cfg := defaultCfg() for i := range opts { @@ -98,7 +99,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) { return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy) } - ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy) + ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy) if err != nil { return nil, fmt.Errorf("could not build placement: %w", err) } @@ -120,10 +121,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) { } rem = []int{-1, -1} - sortedVector, err := sortVector(cfg, unsortedVector) - if err != nil { - return nil, err - } + sortedVector := sortVector(cfg, unsortedVector) ns = [][]netmap.NodeInfo{sortedVector, regularVector} } else if cfg.flatSuccess != nil { ns = flatNodes(ns) @@ -188,7 +186,7 @@ type nodeMetrics struct { metrics []int } -func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, error) { +func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo { nm := make([]nodeMetrics, len(unsortedVector)) node := cfg.nodeState.LocalNodeInfo() @@ -202,14 +200,14 @@ func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, metrics: m, } } - slices.SortFunc(nm, func(a, b nodeMetrics) int { + slices.SortStableFunc(nm, func(a, b nodeMetrics) int { return slices.Compare(a.metrics, b.metrics) }) sortedVector := make([]netmap.NodeInfo, len(unsortedVector)) for i := range unsortedVector { sortedVector[i] = unsortedVector[nm[i].index] } - return sortedVector, nil + return sortedVector } // Node is a descriptor of storage node with information required for intra-container communication. @@ -290,8 +288,8 @@ func (t *Traverser) Next() []Node { func (t *Traverser) skipEmptyVectors() { for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 { - t.vectors = append(t.vectors[:i], t.vectors[i+1:]...) - t.rem = append(t.rem[:i], t.rem[i+1:]...) + t.vectors = slices.Delete(t.vectors, i, i+1) + t.rem = slices.Delete(t.rem, i, i+1) i-- } else { break diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index f96e5c8a7..d1370f21e 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -1,6 +1,8 @@ package placement import ( + "context" + "slices" "strconv" "testing" @@ -17,7 +19,7 @@ type testBuilder struct { vectors [][]netmap.NodeInfo } -func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return b.vectors, nil } @@ -33,8 +35,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { vc := make([][]netmap.NodeInfo, 0, len(v)) for i := range v { - ns := make([]netmap.NodeInfo, len(v[i])) - copy(ns, v[i]) + ns := slices.Clone(v[i]) vc = append(vc, ns) } @@ -102,7 +103,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithoutSuccessTracking(), @@ -131,7 +132,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -160,7 +161,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), ) @@ -201,7 +202,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodes, cnr := testPlacement(selectors, replicas) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local) @@ -276,7 +277,7 @@ func TestTraverserRemValues(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithCopyNumbers(testCase.copyNumbers), @@ -322,7 +323,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -374,7 +375,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -445,7 +446,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { NewAttributeMetric("UN-LOCODE"), } - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -483,7 +484,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { nodesCopy = copyVectors(nodes) - tr, err = NewTraverser( + tr, err = NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -516,7 +517,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { nodesCopy = copyVectors(nodes) - tr, err = NewTraverser( + tr, err = NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -567,7 +568,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -600,4 +601,53 @@ func TestTraverserPriorityMetrics(t *testing.T) { next = tr.Next() require.Nil(t, next) }) + + t.Run("one rep one geo metric", func(t *testing.T) { + t.Skip() + selectors := []int{2} + replicas := []int{2} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") + + sdkNode := testNode(2) + sdkNode.SetAttribute("UN-LOCODE", "FI HEL") + + nodesCopy := copyVectors(nodes) + + parser, err := NewMetricsParser("/path/to/locode_db") + require.NoError(t, err) + m, err := parser.ParseMetrics([]string{geoDistance}) + require.NoError(t, err) + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `$geoDistance` the order will be: + // [ {Node_0 RU MOW}, {Node_1 RU LED}] + // With priority metric `$geoDistance` the order should be: + // [ {Node_1 RU LED}, {Node_0 RU MOW}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) } diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index a4e36c2dc..e5f001d5a 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -61,10 +61,8 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) - } else { - if ts != nil { - return g.handleTS(ctx, addrStr, ts, epoch) - } + } else if ts != nil { + return g.handleTS(ctx, addrStr, ts, epoch) } // requested tombstone not diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 67ddf316f..2147a32fe 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -3,6 +3,7 @@ package tombstone import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker { panicOnNil(cfg.tsSource, "Tombstone source") cache, err := lru.New[string, uint64](cfg.cacheSize) - if err != nil { - panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err)) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) return &ExpirationChecker{ cache: cache, diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go index 1ff07b05a..975941847 100644 --- a/pkg/services/object_manager/tombstone/source/source.go +++ b/pkg/services/object_manager/tombstone/source/source.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -38,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { // Panics if any of the provided options does not allow // constructing a valid tombstone local Source. func NewSource(p TombstoneSourcePrm) Source { - if p.s == nil { - panic("Tombstone source: nil object service") - } + assert.False(p.s == nil, "Tombstone source: nil object service") return Source(p) } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 2e5e54dfd..dcaaec0b4 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -28,10 +28,10 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er )) defer span.End() - cnr, err := p.cnrSrc.Get(objInfo.Address.Container()) + cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container()) if err != nil { if client.IsErrContainerNotFound(err) { - existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container()) + existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container()) if errWasRemoved != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved) } else if existed { @@ -56,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { idObj := objInfo.Address.Object() idCnr := objInfo.Address.Container() - nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy) + nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -110,6 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // Number of copies that are stored on maintenance nodes. var uncheckedCopies int + var candidates []netmap.NodeInfo for i := 0; shortage > 0 && i < len(nodes); i++ { select { case <-ctx.Done(): @@ -117,71 +118,68 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe default: } - if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) { - requirements.needLocalCopy = true - - shortage-- - } else if nodes[i].Status().IsMaintenance() { - shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) - } else { - if status := checkedNodes.processStatus(nodes[i]); status.Processed() { - if status == nodeHoldsObject { - // node already contains replica, no need to replicate - nodes = append(nodes[:i], nodes[i+1:]...) - i-- - shortage-- - } - + var err error + st := checkedNodes.processStatus(nodes[i]) + if !st.Processed() { + st, err = p.checkStatus(ctx, addr, nodes[i]) + checkedNodes.set(nodes[i], st) + if st == nodeDoesNotHoldObject { + // 1. This is the first time the node is encountered (`!st.Processed()`). + // 2. The node does not hold object (`st == nodeDoesNotHoldObject`). + // So we need to try to put an object to it. + candidates = append(candidates, nodes[i]) continue } - - callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) - - _, err := p.remoteHeader(callCtx, nodes[i], addr, false) - - cancel() - - if err == nil { - shortage-- - checkedNodes.submitReplicaHolder(nodes[i]) - } else { - if client.IsErrObjectNotFound(err) { - checkedNodes.submitReplicaCandidate(nodes[i]) - continue - } else if client.IsErrNodeUnderMaintenance(err) { - shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) - } else { - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, - zap.Stringer("object", addr), - zap.String("error", err.Error()), - ) - } - } } - nodes = append(nodes[:i], nodes[i+1:]...) - i-- + switch st { + case nodeIsLocal: + requirements.needLocalCopy = true + + shortage-- + case nodeIsUnderMaintenance: + shortage-- + uncheckedCopies++ + + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, + zap.String("node", netmap.StringifyPublicKey(nodes[i]))) + case nodeHoldsObject: + shortage-- + case nodeDoesNotHoldObject: + case nodeStatusUnknown: + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, + zap.Stringer("object", addr), + zap.Error(err)) + default: + panic("unreachable") + } } - p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies) + p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies) } -// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values -// -// consider remote nodes under maintenance as problem OK. Such -// nodes MAY not respond with object, however, this is how we -// prevent spam with new replicas. -// However, additional copies should not be removed in this case, -// because we can remove the only copy this way. -func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { - checkedNodes.submitReplicaHolder(node) - shortage-- - uncheckedCopies++ +func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { + return nodeIsLocal, nil + } + if node.Status().IsMaintenance() { + return nodeIsUnderMaintenance, nil + } - p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, - zap.String("node", netmap.StringifyPublicKey(node)), - ) - return shortage, uncheckedCopies + callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) + _, err := p.remoteHeader(callCtx, node, addr, false) + cancel() + + if err == nil { + return nodeHoldsObject, nil + } + if client.IsErrObjectNotFound(err) { + return nodeDoesNotHoldObject, nil + } + if client.IsErrNodeUnderMaintenance(err) { + return nodeIsUnderMaintenance, nil + } + return nodeStatusUnknown, err } func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go index d4c7ccbf9..69879c439 100644 --- a/pkg/services/policer/check_test.go +++ b/pkg/services/policer/check_test.go @@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) { cache.SubmitSuccessfulReplication(node) require.Equal(t, cache.processStatus(node), nodeHoldsObject) - cache.submitReplicaCandidate(node) + cache.set(node, nodeDoesNotHoldObject) require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject) - cache.submitReplicaHolder(node) + cache.set(node, nodeHoldsObject) require.Equal(t, cache.processStatus(node), nodeHoldsObject) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index cb583f1d3..fbdeb3148 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -39,7 +39,7 @@ func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectco // All of them must be stored on all of the container nodes. func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { objID := objInfo.Address.Object() - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy) + nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -69,7 +69,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec } func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) + nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { var removeLocalChunk bool requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { // current node is required node, we are happy return ecChunkProcessResult{ validPlacement: true, @@ -131,7 +131,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n // consider maintenance mode has object, but do not drop local copy p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) } else { - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error())) + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err)) } return ecChunkProcessResult{ @@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec if uint32(i) == objInfo.ECInfo.Total { break } - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} } } @@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { var eiErr *objectSDK.ECInfoError for _, n := range nodes { - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { continue } _, err := p.remoteHeader(ctx, n, parentAddress, true) @@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info return } var err error - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { _, err = p.localHeader(ctx, parentAddress) } else { _, err = p.remoteHeader(ctx, n, parentAddress, true) @@ -281,7 +281,9 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info } chunkIDs[ch.Index] = ecInfoChunkID } - } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { + } else if client.IsErrObjectAlreadyRemoved(err) { + restore = false + } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, @@ -341,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, pID, _ := part.ID() addr.SetObject(pID) targetNode := nodes[idx%len(nodes)] - if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ Addr: addr, Obj: part, @@ -369,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I var obj *objectSDK.Object var err error for _, node := range nodes { - if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { obj, err = p.localObject(egCtx, objID) } else { obj, err = p.remoteObject(egCtx, node, objID) diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go index e230153f9..c6980536b 100644 --- a/pkg/services/policer/ec_test.go +++ b/pkg/services/policer/ec_test.go @@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } @@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index cd47cb0fc..c2157de5d 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -8,6 +8,9 @@ const ( nodeNotProcessed nodeProcessStatus = iota nodeDoesNotHoldObject nodeHoldsObject + nodeStatusUnknown + nodeIsUnderMaintenance + nodeIsLocal ) func (st nodeProcessStatus) Processed() bool { @@ -15,37 +18,19 @@ func (st nodeProcessStatus) Processed() bool { } // nodeCache tracks Policer's check progress. -type nodeCache map[uint64]bool +type nodeCache map[uint64]nodeProcessStatus func newNodeCache() nodeCache { - return make(map[uint64]bool) + return make(map[uint64]nodeProcessStatus) } -func (n nodeCache) set(node netmap.NodeInfo, val bool) { +func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) { n[node.Hash()] = val } -// submits storage node as a candidate to store the object replica in case of -// shortage. -func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) { - n.set(node, false) -} - -// submits storage node as a current object replica holder. -func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) { - n.set(node, true) -} - // processStatus returns current processing status of the storage node. func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { - switch val, ok := n[node.Hash()]; { - case !ok: - return nodeNotProcessed - case val: - return nodeHoldsObject - default: - return nodeDoesNotHoldObject - } + return n[node.Hash()] } // SubmitSuccessfulReplication marks given storage node as a current object @@ -53,5 +38,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { // // SubmitSuccessfulReplication implements replicator.TaskResult. func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) { - n.submitReplicaHolder(node) + n.set(node, nodeHoldsObject) } diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index 4e8bacfec..c91e7cc7c 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -1,12 +1,13 @@ package policer import ( + "fmt" "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" - "go.uber.org/zap" ) type objectsInWork struct { @@ -54,12 +55,8 @@ func New(opts ...Option) *Policer { opts[i](c) } - c.log = c.log.With(zap.String("component", "Object Policer")) - cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - if err != nil { - panic(err) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) return &Policer{ cfg: c, diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index 4e17e98a8..049c33753 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "slices" "sort" "testing" "time" @@ -36,10 +37,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) { // Container source and bury function buryCh := make(chan oid.Address) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -78,6 +79,7 @@ func TestProcessObject(t *testing.T) { maintenanceNodes []int wantRemoveRedundant bool wantReplicateTo []int + headResult map[int]error ecInfo *objectcore.ECInfo }{ { @@ -127,7 +129,7 @@ func TestProcessObject(t *testing.T) { nodeCount: 2, policy: `REP 2 REP 2`, placement: [][]int{{0, 1}, {0, 1}}, - wantReplicateTo: []int{1, 1}, // is this actually good? + wantReplicateTo: []int{1}, }, { desc: "lock object must be replicated to all nodes", @@ -145,6 +147,14 @@ func TestProcessObject(t *testing.T) { objHolders: []int{1}, maintenanceNodes: []int{2}, }, + { + desc: "preserve local copy when node response with MAINTENANCE", + nodeCount: 3, + policy: `REP 2`, + placement: [][]int{{1, 2}}, + objHolders: []int{1}, + headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)}, + }, { desc: "lock object must be replicated to all EC nodes", objType: objectSDK.TypeLock, @@ -161,6 +171,14 @@ func TestProcessObject(t *testing.T) { placement: [][]int{{0, 1, 2}}, wantReplicateTo: []int{1, 2}, }, + { + desc: "do not remove local copy when MAINTENANCE status is cached", + objType: objectSDK.TypeRegular, + nodeCount: 3, + policy: `REP 1 REP 1`, + placement: [][]int{{1, 2}, {1, 0}}, + headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)}, + }, } for i := range tests { @@ -204,11 +222,14 @@ func TestProcessObject(t *testing.T) { t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a) return nil, errors.New("unexpected object head") } - for _, i := range ti.objHolders { - if index == i { - return nil, nil + if ti.headResult != nil { + if err, ok := ti.headResult[index]; ok { + return nil, err } } + if slices.Contains(ti.objHolders, index) { + return nil, nil + } return nil, new(apistatus.ObjectNotFound) } @@ -217,14 +238,14 @@ func TestProcessObject(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(addr.Container()) { return cnr, nil } t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container()) return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -282,10 +303,10 @@ func TestProcessObjectError(t *testing.T) { cnr := &container.Container{} cnr.Value.Init() source := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return nil, new(apistatus.ContainerNotFound) }, } @@ -330,10 +351,10 @@ func TestIteratorContract(t *testing.T) { } containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -422,18 +443,22 @@ func (it *sliceKeySpaceIterator) Rewind() { } type containerSrc struct { - get func(id cid.ID) (*container.Container, error) - deletionInfo func(id cid.ID) (*container.DelInfo, error) + get func(ctx context.Context, id cid.ID) (*container.Container, error) + deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error) } -func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) } +func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) { + return f.get(ctx, id) +} -func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) } +func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + return f.deletionInfo(ctx, id) +} // placementBuilderFunc is a placement.Builder backed by a function type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) -func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return f(c, o, p) } diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index 80a87ade9..635a5683b 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -7,7 +7,9 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -18,6 +20,7 @@ func (p *Policer) Run(ctx context.Context) { } func (p *Policer) shardPolicyWorker(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String()) for { select { case <-ctx.Done(): @@ -61,7 +64,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) { p.log.Error(ctx, logs.PolicerUnableToProcessObj, zap.Stringer("object", addr.Address), - zap.String("error", err.Error())) + zap.Error(err)) } p.cache.Add(addr.Address, time.Now()) p.objsInWork.remove(addr.Address) diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 2120312f6..8c6f0df06 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -45,8 +44,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return } @@ -65,7 +63,6 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T log := p.log.With( zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])), zap.Stringer("object", task.Addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) callCtx, cancel := context.WithTimeout(ctx, p.putTimeout) @@ -76,7 +73,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T if err != nil { log.Error(ctx, logs.ReplicatorCouldNotReplicateObject, - zap.String("error", err.Error()), + zap.Error(err), ) } else { log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated) diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index 5ce929342..216fe4919 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -3,12 +3,12 @@ package replicator import ( "context" "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -43,23 +43,17 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err == nil { break } - var endpoints []string - node.IterateNetworkEndpoints(func(s string) bool { - endpoints = append(endpoints, s) - return false - }) + endpoints := slices.Collect(node.NetworkEndpoints()) p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), - zap.Strings("endpoints", endpoints), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Strings("endpoints", endpoints)) } if obj == nil { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), - zap.Error(errFailedToGetObjectFromAnyNode), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(errFailedToGetObjectFromAnyNode)) return } @@ -67,7 +61,6 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go index 489f66ae5..bcad8471d 100644 --- a/pkg/services/replicator/put.go +++ b/pkg/services/replicator/put.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -33,8 +32,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { if task.Obj == nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(errObjectNotDefined), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(errObjectNotDefined)) return } @@ -42,7 +40,6 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index 6910fa5af..a940cef37 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -7,7 +7,6 @@ import ( objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Replicator represents the utility that replicates @@ -45,8 +44,6 @@ func New(opts ...Option) *Replicator { opts[i](c) } - c.log = c.log.With(zap.String("component", "Object Replicator")) - return &Replicator{ cfg: c, } diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 12b221613..f0591de71 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -33,10 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(ctx, logs.ServingRequest, - zap.String("component", "SessionService"), - zap.String("request", "Create"), - ) + s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) respBody, err := s.exec.Create(ctx, req.GetBody()) if err != nil { diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index d312ea0ea..132d62445 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { // enable encryption if it // was configured so if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) + rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) cfg.privateKey.D.FillBytes(rawKey) c, err := aes.NewCipher(rawKey) diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go index d531b25cb..423e579d7 100644 --- a/pkg/services/session/storage/temporary/executor.go +++ b/pkg/services/session/storage/temporary/executor.go @@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) s.mtx.Lock() s.tokens[key{ tokenID: base58.Encode(uidBytes), - ownerID: base58.Encode(id.WalletBytes()), + ownerID: id.EncodeToString(), }] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration()) s.mtx.Unlock() diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go index 9ae9db9dc..c9da6b842 100644 --- a/pkg/services/session/storage/temporary/storage.go +++ b/pkg/services/session/storage/temporary/storage.go @@ -41,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken s.mtx.RLock() t := s.tokens[key{ tokenID: base58.Encode(tokenID), - ownerID: base58.Encode(ownerID.WalletBytes()), + ownerID: ownerID.EncodeToString(), }] s.mtx.RUnlock() diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 606044f8e..58757ff6d 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -22,7 +22,7 @@ import ( ) func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) (aperequest.Request, error) { schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) if err != nil { @@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()), nativeschema.PropertyKeyActorRole: schemaRole, } - reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey) + reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey) if err != nil { return aperequest.Request{}, err } @@ -53,15 +53,19 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) } + resProps := map[string]string{ + nativeschema.ProperyKeyTreeID: treeID, + } + return aperequest.NewRequest( schemaMethod, - aperequest.NewResource(resourceName, make(map[string]string)), + aperequest.NewResource(resourceName, resProps), reqProps, ), nil } func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) error { namespace := "" cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") @@ -69,12 +73,12 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, namespace = cntNamespace } - request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey) + request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) if err != nil { return fmt.Errorf("failed to create ape request: %w", err) } - return s.apeChecker.CheckAPE(checkercore.CheckPrm{ + return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{ Request: request, Namespace: namespace, Container: cid, @@ -85,11 +89,11 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { +func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey) if err != nil { return reqProps, err } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go index 3f94925b5..7b209fd47 100644 --- a/pkg/services/tree/ape_test.go +++ b/pkg/services/tree/ape_test.go @@ -37,7 +37,7 @@ type frostfsIDProviderMock struct { subjectsExtended map[util.Uint160]*client.SubjectExtended } -func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -45,7 +45,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -107,6 +107,45 @@ func TestCheckAPE(t *testing.T) { cid := cid.ID{} _ = cid.DecodeString(containerID) + t.Run("treeID rule", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.QuotaLimitReached, + Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindResource, + Key: nativeschema.ProperyKeyTreeID, + Value: versionTreeID, + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) + + var chErr *checkercore.ChainRouterError + require.ErrorAs(t, err, &chErr) + require.Equal(t, chain.QuotaLimitReached, chErr.Status()) + }) + t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { los := inmemory.NewInmemoryLocalStorage() mcs := inmemory.NewInmemoryMorphRuleChainStorage() @@ -152,7 +191,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) @@ -201,7 +240,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) } diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index ac80d0e4c..a11700771 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -10,12 +10,9 @@ import ( internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" ) type clientCache struct { @@ -51,7 +48,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { c.Lock() - ccInt, ok := c.LRU.Get(netmapAddr) + ccInt, ok := c.Get(netmapAddr) c.Unlock() if ok { @@ -69,14 +66,19 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl } } - cc, err := c.dialTreeService(ctx, netmapAddr) + var netAddr network.Address + if err := netAddr.FromString(netmapAddr); err != nil { + return nil, err + } + + cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) lastTry := time.Now() c.Lock() if err != nil { - c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) } else { - c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) } c.Unlock() @@ -86,48 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl return NewTreeServiceClient(cc), nil } - -func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) { - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - opts := []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), - ), - grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - ), - grpc.WithContextDialer(c.ds.GrpcContextDialer()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - } - - if !netAddr.IsTLSEnabled() { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, c.key); err != nil { - return nil, err - } - - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - defer cancel() - // perform some request to check connection - if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { - _ = cc.Close() - return nil, err - } - return cc, nil -} diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go index 435257550..c641a21a2 100644 --- a/pkg/services/tree/container.go +++ b/pkg/services/tree/container.go @@ -2,6 +2,7 @@ package tree import ( "bytes" + "context" "crypto/sha256" "fmt" "sync" @@ -32,13 +33,13 @@ type containerCacheItem struct { const defaultContainerCacheSize = 10 // getContainerNodes returns nodes in the container and a position of local key in the list. -func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { - nm, err := s.nmSource.GetNetMap(0) +func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { + nm, err := s.nmSource.GetNetMap(ctx, 0) if err != nil { return nil, -1, fmt.Errorf("can't get netmap: %w", err) } - cnr, err := s.cnrSource.Get(cid) + cnr, err := s.cnrSource.Get(ctx, cid) if err != nil { return nil, -1, fmt.Errorf("can't get container: %w", err) } diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go index 0f0e4ee57..07503f8c3 100644 --- a/pkg/services/tree/metrics.go +++ b/pkg/services/tree/metrics.go @@ -6,6 +6,7 @@ type MetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type defaultMetricsRegister struct{} @@ -13,3 +14,4 @@ type defaultMetricsRegister struct{} func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {} +func (defaultMetricsRegister) AddOperation(string, string) {} diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index a3f488009..56cbcc081 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -1,7 +1,9 @@ package tree import ( + "context" "crypto/ecdsa" + "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -18,12 +20,12 @@ import ( type ContainerSource interface { container.Source - DeletionInfo(cid.ID) (*container.DelInfo, error) + DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) // List must return list of all the containers in the FrostFS network // at the moment of a call and any error that does not allow fetching // container information. - List() ([]cid.ID, error) + List(ctx context.Context) ([]cid.ID, error) } type cfg struct { @@ -40,7 +42,7 @@ type cfg struct { replicatorWorkerCount int replicatorTimeout time.Duration containerCacheSize int - authorizedKeys [][]byte + authorizedKeys atomic.Pointer[[][]byte] syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage @@ -146,10 +148,7 @@ func WithMetrics(v MetricsRegister) Option { // keys that have rights to use Tree service. func WithAuthorizedKeys(keys keys.PublicKeys) Option { return func(c *cfg) { - c.authorizedKeys = nil - for _, key := range keys { - c.authorizedKeys = append(c.authorizedKeys, key.Bytes()) - } + c.authorizedKeys.Store(fromPublicKeys(keys)) } } diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go new file mode 100644 index 000000000..8f21686df --- /dev/null +++ b/pkg/services/tree/qos.go @@ -0,0 +1,101 @@ +package tree + +import ( + "context" + + "google.golang.org/grpc" +) + +var _ TreeServiceServer = (*ioTagAdjust)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type ioTagAdjust struct { + s TreeServiceServer + a AdjustIOTag +} + +func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer { + return &ioTagAdjust{ + s: s, + a: a, + } +} + +func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Add(ctx, req) +} + +func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.AddByPath(ctx, req) +} + +func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Apply(ctx, req) +} + +func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.GetNodeByPath(ctx, req) +} + +func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Healthcheck(ctx, req) +} + +func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Move(ctx, req) +} + +func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Remove(ctx, req) +} + +func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.TreeList(ctx, req) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosServerWrapper[T any] struct { + grpc.ServerStream + sender qosSend[T] + ctxF func() context.Context +} + +func (w *qosServerWrapper[T]) Send(resp T) error { + return w.sender.Send(resp) +} + +func (w *qosServerWrapper[T]) Context() context.Context { + return w.ctxF() +} diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 416a0fafe..647f8cb30 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -6,7 +6,6 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -20,8 +19,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request") func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { var resp *Resp var outErr error - err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = callback(c, ctx, req) + err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = callback(c, fCtx, req) return true }) if err != nil { @@ -32,7 +31,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { +func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { for _, n := range cntNodes { if bytes.Equal(n.PublicKey(), s.rawPub) { return nil @@ -42,25 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo var called bool for _, n := range cntNodes { var stop bool - n.IterateNetworkEndpoints(func(endpoint string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false + for endpoint := range n.NetworkEndpoints() { + stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { + called = true + return f(fCtx, c) + }) + if called { + break } - - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - - called = true - stop = f(c) - return true - }) + } if stop { return nil } @@ -70,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo } return nil } + +func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + + c, err := s.cache.get(ctx, endpoint) + if err != nil { + return false + } + + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) + return f(ctx, c) +} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 0c5bde078..ee40884eb 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -59,7 +58,7 @@ func (s *Service) localReplicationWorker(ctx context.Context) { err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false) if err != nil { s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation, - zap.String("err", err.Error())) + zap.Error(err)) } span.End() } @@ -90,41 +89,23 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req var lastErr error var lastAddr string - n.IterateNetworkEndpoints(func(addr string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - + for addr := range n.NetworkEndpoints() { lastAddr = addr - - c, err := s.cache.get(ctx, addr) - if err != nil { - lastErr = fmt.Errorf("can't create client: %w", err) - return false + lastErr = s.apply(ctx, n, addr, req) + if lastErr == nil { + break } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, lastErr = c.Apply(ctx, req) - cancel() - - return lastErr == nil - }) + } if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode, - zap.String("last_error", lastErr.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("last_error", lastErr.Error())) } else { s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), - zap.String("key", hex.EncodeToString(n.PublicKey())), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("key", hex.EncodeToString(n.PublicKey()))) } s.metrics.AddReplicateTaskDuration(time.Since(start), false) return lastErr @@ -133,6 +114,26 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req return nil } +func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + + c, err := s.cache.get(ctx, addr) + if err != nil { + return fmt.Errorf("can't create client: %w", err) + } + + ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) + _, err = c.Apply(ctx, req) + cancel() + return err +} + func (s *Service) replicateLoop(ctx context.Context) { for range s.replicatorWorkerCount { go s.replicationWorker(ctx) @@ -152,10 +153,10 @@ func (s *Service) replicateLoop(ctx context.Context) { return case op := <-s.replicateCh: start := time.Now() - err := s.replicate(op) + err := s.replicate(ctx, op) if err != nil { s.log.Error(ctx, logs.TreeErrorDuringReplication, - zap.String("err", err.Error()), + zap.Error(err), zap.Stringer("cid", op.cid), zap.String("treeID", op.treeID)) } @@ -164,14 +165,14 @@ func (s *Service) replicateLoop(ctx context.Context) { } } -func (s *Service) replicate(op movePair) error { +func (s *Service) replicate(ctx context.Context, op movePair) error { req := newApplyRequest(&op) err := SignMessage(req, s.key) if err != nil { return fmt.Errorf("can't sign data: %w", err) } - nodes, localIndex, err := s.getContainerNodes(op.cid) + nodes, localIndex, err := s.getContainerNodes(ctx, op.cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -205,7 +206,7 @@ func newApplyRequest(op *movePair) *ApplyRequest { TreeId: op.treeID, Operation: &LogMove{ ParentId: op.op.Parent, - Meta: op.op.Meta.Bytes(), + Meta: op.op.Bytes(), ChildId: op.op.Child, }, }, diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 2df3c08e6..3994d6973 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -9,12 +9,15 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -57,6 +60,7 @@ func New(opts ...Option) *Service { s.replicatorTimeout = defaultReplicatorSendTimeout s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} + s.authorizedKeys.Store(&[][]byte{}) for i := range opts { opts[i](&s.cfg) @@ -83,6 +87,7 @@ func New(opts ...Option) *Service { // Start starts the service. func (s *Service) Start(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) @@ -102,6 +107,7 @@ func (s *Service) Shutdown() { } func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -113,12 +119,12 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -145,6 +151,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error } func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -156,12 +163,12 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -200,6 +207,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP } func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -211,12 +219,12 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -244,6 +252,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon // Move applies client operation to the specified tree and pushes in queue // for replication on other nodes. func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -255,12 +264,12 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -287,6 +296,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er } func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -298,12 +308,12 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -337,14 +347,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } else { var metaValue []KeyValue for _, kv := range m.Items { - for _, attr := range b.GetAttributes() { - if kv.Key == attr { - metaValue = append(metaValue, KeyValue{ - Key: kv.Key, - Value: kv.Value, - }) - break - } + if slices.Contains(b.GetAttributes(), kv.Key) { + metaValue = append(metaValue, KeyValue{ + Key: kv.Key, + Value: kv.Value, + }) } } x.Meta = metaValue @@ -360,6 +367,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -371,20 +379,20 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(srv.Context(), cid) if err != nil { return err } if pos < 0 { var cli TreeService_GetSubTreeClient var outErr error - err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(srv.Context(), req) + err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetSubTree(fCtx, req) return true }) if err != nil { @@ -406,7 +414,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS type stackItem struct { values []pilorama.MultiNodeInfo parent pilorama.MultiNode - last *string + last *pilorama.Cursor } func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { @@ -430,10 +438,8 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid } if ms == nil { ms = m.Items - } else { - if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") - } + } else if len(m.Items) != 1 { + return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") } ts = append(ts, m.Time) ps = append(ps, p) @@ -457,14 +463,13 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid break } - nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) + var err error + item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) if err != nil { return err } - item.values = nodes - item.last = last - if len(nodes) == 0 { + if len(item.values) == 0 { stack = stack[:len(stack)-1] continue } @@ -586,7 +591,8 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di } // Apply locally applies operation from the remote node to the tree. -func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) { +func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx)) err := verifyMessage(req) if err != nil { return nil, err @@ -599,7 +605,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e key := req.GetSignature().GetKey() - _, pos, _, err := s.getContainerInfo(cid, key) + _, pos, _, err := s.getContainerInfo(ctx, cid, key) if err != nil { return nil, err } @@ -630,6 +636,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e } func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -641,15 +648,15 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) return err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(srv.Context(), cid) if err != nil { return err } if pos < 0 { var cli TreeService_GetOpLogClient var outErr error - err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(srv.Context(), req) + err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetOpLog(fCtx, req) return true }) if err != nil { @@ -680,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) Body: &GetOpLogResponse_Body{ Operation: &LogMove{ ParentId: lm.Parent, - Meta: lm.Meta.Bytes(), + Meta: lm.Bytes(), ChildId: lm.Child, }, }, @@ -694,6 +701,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) } func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -713,7 +721,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -755,8 +763,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue { // getContainerInfo returns the list of container nodes, position in the container for the node // with pub key and total amount of nodes in all replicas. -func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { - cntNodes, _, err := s.getContainerNodes(cid) +func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { + cntNodes, _, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, 0, 0, err } @@ -776,3 +784,15 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec return new(HealthcheckResponse), nil } + +func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { + s.authorizedKeys.Store(fromPublicKeys(newKeys)) +} + +func fromPublicKeys(keys keys.PublicKeys) *[][]byte { + buff := make([][]byte, len(keys)) + for i, k := range keys { + buff[i] = k.Bytes() + } + return &buff +} diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 4fd4a7e1e..8221a4546 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -9,8 +9,10 @@ import ( "fmt" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -36,7 +38,7 @@ var ( // Operation must be one of: // - 1. ObjectPut; // - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error { +func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { err := verifyMessage(req) if err != nil { return err @@ -47,7 +49,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return err } - cnr, err := s.cnrSource.Get(cid) + cnr, err := s.cnrSource.Get(ctx, cid) if err != nil { return fmt.Errorf("can't get container %s: %w", cid, err) } @@ -62,7 +64,22 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey) + if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { + return apeErr(err) + } + return nil +} + +func apeErr(err error) error { + var chRouterErr *checkercore.ChainRouterError + if !errors.As(err, &chRouterErr) { + errServerInternal := &apistatus.ServerInternal{} + apistatus.WriteInternalServerErr(errServerInternal, err) + return errServerInternal + } + errAccessDenied := &apistatus.ObjectAccessDenied{} + errAccessDenied.WriteReason(err.Error()) + return errAccessDenied } // Returns true iff the operation is read-only and request was signed @@ -78,8 +95,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { } key := sign.GetKey() - for i := range s.authorizedKeys { - if bytes.Equal(s.authorizedKeys[i], key) { + for _, currentKey := range *s.authorizedKeys.Load() { + if bytes.Equal(currentKey, key) { return true, nil } } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 7bc5002dc..8815c227f 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/require" ) +const versionTreeID = "version" + type dummyNetmapSource struct { netmap.Source } @@ -39,7 +41,7 @@ type dummySubjectProvider struct { subjects map[util.Uint160]client.SubjectExtended } -func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) { +func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { res := s.subjects[addr] return &client.Subject{ PrimaryKey: res.PrimaryKey, @@ -50,7 +52,7 @@ func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, er }, nil } -func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { +func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { res := s.subjects[addr] return &res, nil } @@ -65,7 +67,7 @@ func (s dummyEpochSource) CurrentEpoch() uint64 { type dummyContainerSource map[string]*containercore.Container -func (s dummyContainerSource) List() ([]cid.ID, error) { +func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) { res := make([]cid.ID, 0, len(s)) var cnr cid.ID @@ -81,7 +83,7 @@ func (s dummyContainerSource) List() ([]cid.ID, error) { return res, nil } -func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { +func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) { cnt, ok := s[id.String()] if !ok { return nil, errors.New("container not found") @@ -89,7 +91,7 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { return cnt, nil } -func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) { +func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) { return &containercore.DelInfo{}, nil } @@ -150,6 +152,7 @@ func TestMessageSign(t *testing.T) { apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } + s.cfg.authorizedKeys.Store(&[][]byte{}) rawCID1 := make([]byte, sha256.Size) cid1.Encode(rawCID1) @@ -168,26 +171,26 @@ func TestMessageSign(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRW) t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) cnr.Value.SetBasicACL(acl.Private) t.Run("extension disabled", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) t.Run("invalid key", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) }) t.Run("bearer", func(t *testing.T) { @@ -200,7 +203,7 @@ func TestMessageSign(t *testing.T) { t.Run("invalid bearer", func(t *testing.T) { req.Body.BearerToken = []byte{0xFF} require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer CID", func(t *testing.T) { @@ -209,7 +212,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer owner", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -217,7 +220,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer signature", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -229,20 +232,112 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bv2.StableMarshal(nil) require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + + t.Run("omit override within bt", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + }) + + t.Run("invalid override within bearer token", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) }) t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate, but target user is still set", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) bt.SetImpersonate(true) + var reqSigner user.ID + user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey())) + + bt.ForUser(reqSigner) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate but invalid signer", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) require.NoError(t, bt.Sign(privs[1].PrivateKey)) req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -252,18 +347,18 @@ func TestMessageSign(t *testing.T) { t.Run("put and get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("only get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("none", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) }) } @@ -282,6 +377,25 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token return b } +func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + b.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + }, + Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, + }) + + return b +} + +func testBearerTokenNoOverride() bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + return b +} + func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { ruleGet := chain.Rule{ Status: chain.Allow, diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index c48a312fb..af355639f 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -2,7 +2,9 @@ package tree import ( "context" + "crypto/ecdsa" "crypto/sha256" + "crypto/tls" "errors" "fmt" "io" @@ -13,6 +15,8 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -20,12 +24,15 @@ import ( metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -39,7 +46,7 @@ const defaultSyncWorkerCount = 20 // tree IDs from the other container nodes. Returns ErrNotInContainer if the node // is not included in the container. func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { - nodes, pos, err := s.getContainerNodes(cid) + nodes, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -71,8 +78,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { var treesToSync []string var outErr error - err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool { - resp, outErr = c.TreeList(ctx, req) + err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = c.TreeList(fCtx, req) if outErr != nil { return false } @@ -112,7 +119,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { // SynchronizeTree tries to synchronize log starting from the last stored height. func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error { - nodes, pos, err := s.getContainerNodes(cid) + nodes, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -131,14 +138,9 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string } // mergeOperationStreams performs merge sort for node operation streams to one stream. -func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { +func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { defer close(merged) - ms := make([]*pilorama.Move, len(streams)) - for i := range streams { - ms[i] = <-streams[i] - } - // Merging different node streams shuffles incoming operations like that: // // x - operation from the stream A @@ -150,6 +152,15 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram // operation height from the stream B. This height is stored in minStreamedLastHeight. var minStreamedLastHeight uint64 = math.MaxUint64 + ms := make([]*pilorama.Move, len(streams)) + for i := range streams { + select { + case ms[i] = <-streams[i]: + case <-ctx.Done(): + return minStreamedLastHeight + } + } + for { var minTimeMoveTime uint64 = math.MaxUint64 minTimeMoveIndex := -1 @@ -164,7 +175,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram break } - merged <- ms[minTimeMoveIndex] + select { + case merged <- ms[minTimeMoveIndex]: + case <-ctx.Done(): + return minStreamedLastHeight + } height := ms[minTimeMoveIndex].Time if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil { minStreamedLastHeight = min(minStreamedLastHeight, height) @@ -176,7 +191,7 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, operationStream <-chan *pilorama.Move, -) uint64 { +) (uint64, error) { var prev *pilorama.Move var batch []*pilorama.Move for m := range operationStream { @@ -189,17 +204,17 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s if len(batch) == s.syncBatchSize { if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time + return batch[0].Time, err } batch = batch[:0] } } if len(batch) > 0 { if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time + return batch[0].Time, err } } - return math.MaxUint64 + return math.MaxUint64, nil } func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, @@ -232,10 +247,14 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, Parent: lm.GetParentId(), Child: lm.GetChildId(), } - if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { + if err := m.FromBytes(lm.GetMeta()); err != nil { return err } - opsCh <- m + select { + case opsCh <- m: + case <-ctx.Done(): + return ctx.Err() + } } if !errors.Is(err, io.EOF) { return err @@ -264,13 +283,14 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, merged := make(chan *pilorama.Move) var minStreamedLastHeight uint64 errGroup.Go(func() error { - minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged) + minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged) return nil }) var minUnappliedHeight uint64 errGroup.Go(func() error { - minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged) - return nil + var err error + minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged) + return err }) var allNodesSynced atomic.Bool @@ -279,27 +299,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, for i, n := range nodes { errGroup.Go(func() error { var nodeSynced bool - n.IterateNetworkEndpoints(func(addr string) bool { + for addr := range n.NetworkEndpoints() { var a network.Address if err := a.FromString(addr); err != nil { s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + continue } - cc, err := s.createConnection(a) + cc, err := dialTreeService(ctx, a, s.key, s.ds) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + continue } - defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil - return true - }) + _ = cc.Close() + break + } close(nodeOperationStreams[i]) if !nodeSynced { allNodesSynced.Store(false) @@ -324,19 +344,60 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { - return grpc.NewClient(a.URIAddr(), +func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { + cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) + defer cancel() + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, key); err != nil { + return nil, err + } + + // perform some request to check connection + if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { + _ = cc.Close() + return nil, err + } + return cc, nil +} + +func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + host, isTLS, err := client.ParseURI(a.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + + defaultOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInteceptor(), + tracing_grpc.NewUnaryClientInterceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithTransportCredentials(creds), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - ) + grpc.WithDisableServiceConfig(), + } + + return grpc.NewClient(host, append(defaultOpts, opts...)...) } // ErrAlreadySyncing is returned when a service synchronization has already @@ -380,7 +441,7 @@ func (s *Service) syncLoop(ctx context.Context) { start := time.Now() - cnrs, err := s.cfg.cnrSource.List() + cnrs, err := s.cnrSource.List(ctx) if err != nil { s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) @@ -450,7 +511,7 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID continue } - existed, err := containerCore.WasRemoved(s.cnrSource, cnr) + existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr) if err != nil { s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted, zap.Stringer("cid", cnr), @@ -480,7 +541,7 @@ func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid. cnrsToSync := make([]cid.ID, 0, len(cnrs)) for _, cnr := range cnrs { - _, pos, err := s.getContainerNodes(cnr) + _, pos, err := s.getContainerNodes(ctx, cnr) if err != nil { s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go index 497d90554..87d419408 100644 --- a/pkg/services/tree/sync_test.go +++ b/pkg/services/tree/sync_test.go @@ -1,6 +1,7 @@ package tree import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -64,7 +65,7 @@ func Test_mergeOperationStreams(t *testing.T) { merged := make(chan *pilorama.Move, 1) min := make(chan uint64) go func() { - min <- mergeOperationStreams(nodeOpChans, merged) + min <- mergeOperationStreams(context.Background(), nodeOpChans, merged) }() var res []uint64 diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go index b4a31fd8d..6f114d45b 100644 --- a/pkg/util/ape/parser.go +++ b/pkg/util/ape/parser.go @@ -174,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) { case "deny": if !found { return apechain.AccessDenied, nil - } else if strings.EqualFold(expression, "QuotaLimitReached") { - return apechain.QuotaLimitReached, nil - } else { - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) } + if strings.EqualFold(expression, "QuotaLimitReached") { + return apechain.QuotaLimitReached, nil + } + return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) case "allow": if found { return 0, errUnknownStatusDetail @@ -261,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) { } else { if lexeme == "*" { return nativeschema.ResourceFormatAllContainers, nil - } else if lexeme == "/*" { + } else if lexeme == "/*" || lexeme == "root/*" { return nativeschema.ResourceFormatRootContainers, nil } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 { lexeme = lexeme[1:] diff --git a/pkg/util/ape/parser_test.go b/pkg/util/ape/parser_test.go index 21649fd24..c236c4603 100644 --- a/pkg/util/ape/parser_test.go +++ b/pkg/util/ape/parser_test.go @@ -43,6 +43,15 @@ func TestParseAPERule(t *testing.T) { Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, }, }, + { + name: "Valid rule for all containers in explicit root namespace", + rule: "allow Container.Put root/*", + expectRule: policyengine.Rule{ + Status: policyengine.Allow, + Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}}, + Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}}, + }, + }, { name: "Valid rule for all objects in root namespace and container", rule: "allow Object.Put /cid/*", diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go index 547c8d50b..66581878a 100644 --- a/pkg/util/attributes/parser_test.go +++ b/pkg/util/attributes/parser_test.go @@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) { mExp = mSrc } - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { v, ok := mExp[key] require.True(t, ok) require.Equal(t, value, v) delete(mExp, key) - }) + } require.Empty(t, mExp) } diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go index 923412a7f..2589ab786 100644 --- a/pkg/util/http/server.go +++ b/pkg/util/http/server.go @@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server { o(c) } - switch { - case c.shutdownTimeout <= 0: + if c.shutdownTimeout <= 0 { panicOnOptValue("shutdown timeout", c.shutdownTimeout) } diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go index b2942b52a..6337039a9 100644 --- a/pkg/util/keyer/dashboard.go +++ b/pkg/util/keyer/dashboard.go @@ -6,6 +6,7 @@ import ( "os" "text/tabwriter" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -104,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { func base58ToHex(data string) string { val, err := base58.Decode(data) - if err != nil { - panic("produced incorrect base58 value") - } + assert.NoError(err, "produced incorrect base58 value") return hex.EncodeToString(val) } diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go index 269e07d90..413b1d9aa 100644 --- a/pkg/util/logger/log.go +++ b/pkg/util/logger/log.go @@ -4,37 +4,32 @@ import ( "context" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" ) func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Debug(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Debug(msg, fields...) + l.z.Debug(msg, appendContext(ctx, fields...)...) } func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Info(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Info(msg, fields...) + l.z.Info(msg, appendContext(ctx, fields...)...) } func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Warn(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Warn(msg, fields...) + l.z.Warn(msg, appendContext(ctx, fields...)...) } func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Error(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Error(msg, fields...) + l.z.Error(msg, appendContext(ctx, fields...)...) +} + +func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + fields = append(fields, zap.String("trace_id", traceID)) + } + if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined { + fields = append(fields, zap.String("io_tag", ioTag)) + } + return fields } diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 19d3f1ed1..a1998cb1a 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -2,6 +2,7 @@ package logger import ( "fmt" + "time" "git.frostfs.info/TrueCloudLab/zapjournald" "github.com/ssgreg/journald" @@ -12,8 +13,10 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - z *zap.Logger - lvl zap.AtomicLevel + z *zap.Logger + c zapcore.Core + t Tag + w bool } // Prm groups Logger's parameters. @@ -22,16 +25,8 @@ type Logger struct { // Parameters that have been connected to the Logger support its // configuration changing. // -// Passing Prm after a successful connection via the NewLogger, connects -// the Prm to a new instance of the Logger. -// -// See also Reload, SetLevelString. +// See also Logger.Reload, SetLevelString. type Prm struct { - // link to the created Logger - // instance; used for a runtime - // reconfiguration - _log *Logger - // support runtime rereading level zapcore.Level @@ -43,6 +38,12 @@ type Prm struct { // PrependTimestamp specifies whether to prepend a timestamp in the log PrependTimestamp bool + + // Options for zap.Logger + Options []zap.Option + + // map of tag's bit masks to log level, overrides lvl + tl map[Tag]zapcore.Level } const ( @@ -72,20 +73,10 @@ func (p *Prm) SetDestination(d string) error { return nil } -// Reload reloads configuration of a connected instance of the Logger. -// Returns ErrLoggerNotConnected if no connection has been performed. -// Returns any reconfiguration error from the Logger directly. -func (p Prm) Reload() error { - if p._log == nil { - // incorrect logger usage - panic("parameters are not connected to any Logger") - } - - return p._log.reload(p) -} - -func defaultPrm() *Prm { - return new(Prm) +// SetTags parses list of tags with log level. +func (p *Prm) SetTags(tags [][]string) (err error) { + p.tl, err = parseTags(tags) + return err } // NewLogger constructs a new zap logger instance. Constructing with nil @@ -99,10 +90,7 @@ func defaultPrm() *Prm { // - ISO8601 time encoding. // // Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm *Prm) (*Logger, error) { - if prm == nil { - prm = defaultPrm() - } +func NewLogger(prm Prm) (*Logger, error) { switch prm.dest { case DestinationUndefined, DestinationStdout: return newConsoleLogger(prm) @@ -113,11 +101,9 @@ func NewLogger(prm *Prm) (*Logger, error) { } } -func newConsoleLogger(prm *Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - +func newConsoleLogger(prm Prm) (*Logger, error) { c := zap.NewProductionConfig() - c.Level = lvl + c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -129,26 +115,23 @@ func newConsoleLogger(prm *Prm) (*Logger, error) { c.EncoderConfig.TimeKey = "" } - lZap, err := c.Build( + opts := []zap.Option{ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1), - ) + } + opts = append(opts, prm.Options...) + lZap, err := c.Build(opts...) if err != nil { return nil, err } - - l := &Logger{z: lZap, lvl: lvl} - prm._log = l + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func newJournaldLogger(prm *Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - +func newJournaldLogger(prm Prm) (*Logger, error) { c := zap.NewProductionConfig() - c.Level = lvl - c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook } @@ -161,36 +144,100 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) coreWithContext := core.With([]zapcore.Field{ zapjournald.SyslogFacility(zapjournald.LogDaemon), zapjournald.SyslogIdentifier(), zapjournald.SyslogPid(), }) - lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) - - l := &Logger{z: lZap, lvl: lvl} - prm._log = l + var samplerOpts []zapcore.SamplerOption + if c.Sampling.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook)) + } + samplingCore := zapcore.NewSamplerWithOptions( + coreWithContext, + time.Second, + c.Sampling.Initial, + c.Sampling.Thereafter, + samplerOpts..., + ) + opts := []zap.Option{ + zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), + zap.AddCallerSkip(1), + } + opts = append(opts, prm.Options...) + lZap := zap.New(samplingCore, opts...) + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func (l *Logger) reload(prm Prm) error { - l.lvl.SetLevel(prm.level) - return nil -} - -func (l *Logger) WithOptions(options ...zap.Option) { - l.z = l.z.WithOptions(options...) -} - +// With create a child logger with new fields, don't affect the parent. +// Throws panic if tag is unset. func (l *Logger) With(fields ...zap.Field) *Logger { - return &Logger{z: l.z.With(fields...)} + if l.t == 0 { + panic("tag is unset") + } + c := *l + c.z = l.z.With(fields...) + // With called under the logger + c.w = true + return &c +} + +type core struct { + c zapcore.Core + l zap.AtomicLevel +} + +func (c *core) Enabled(lvl zapcore.Level) bool { + return c.l.Enabled(lvl) +} + +func (c *core) With(fields []zapcore.Field) zapcore.Core { + clone := *c + clone.c = clone.c.With(fields) + return &clone +} + +func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return c.c.Check(e, ce) +} + +func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { + return c.c.Write(e, fields) +} + +func (c *core) Sync() error { + return c.c.Sync() +} + +// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. +// Throws panic if provided unsupported tag. +func (l *Logger) WithTag(tag Tag) *Logger { + if tag == 0 || tag > Tag(len(_Tag_index)-1) { + panic("unsupported tag " + tag.String()) + } + if l.w { + panic("unsupported operation for the logger's state") + } + c := *l + c.t = tag + c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { + return &core{ + c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), + l: tagToLogLevel[tag], + } + })) + return &c } func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z.WithOptions(zap.AddCallerSkip(1)), + t: TagMain, + c: z.Core(), } } diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go new file mode 100644 index 000000000..b867ee6cc --- /dev/null +++ b/pkg/util/logger/logger_test.go @@ -0,0 +1,118 @@ +package logger + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func BenchmarkLogger(b *testing.B) { + ctx := context.Background() + m := map[string]Prm{} + + prm := Prm{} + require.NoError(b, prm.SetLevelString("debug")) + m["logging enabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + m["logging disabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) + m["logging enabled via tags"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("debug")) + require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) + m["logging disabled via tags"] = prm + + for k, v := range m { + b.Run(k, func(b *testing.B) { + logger, err := createLogger(v) + require.NoError(b, err) + UpdateLevelForTags(v) + b.ResetTimer() + b.ReportAllocs() + for range b.N { + logger.Info(ctx, "test info") + } + }) + } +} + +type testCore struct { + core zapcore.Core +} + +func (c *testCore) Enabled(lvl zapcore.Level) bool { + return c.core.Enabled(lvl) +} + +func (c *testCore) With(fields []zapcore.Field) zapcore.Core { + c.core = c.core.With(fields) + return c +} + +func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return ce.AddCore(e, c) +} + +func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { + return nil +} + +func (c *testCore) Sync() error { + return c.core.Sync() +} + +func createLogger(prm Prm) (*Logger, error) { + prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + tc := testCore{core: core} + return &tc + })} + return NewLogger(prm) +} + +func TestLoggerOutput(t *testing.T) { + obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) + + prm := Prm{} + require.NoError(t, prm.SetLevelString("debug")) + prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { + return obs + })} + loggerMain, err := NewLogger(prm) + require.NoError(t, err) + UpdateLevelForTags(prm) + + loggerMainWith := loggerMain.With(zap.String("key", "value")) + + require.Panics(t, func() { + loggerMainWith.WithTag(TagShard) + }) + loggerShard := loggerMain.WithTag(TagShard) + loggerShard = loggerShard.With(zap.String("key1", "value1")) + + loggerMorph := loggerMain.WithTag(TagMorph) + loggerMorph = loggerMorph.With(zap.String("key2", "value2")) + + ctx := context.Background() + loggerMain.Debug(ctx, "main") + loggerMainWith.Debug(ctx, "main with") + loggerShard.Debug(ctx, "shard") + loggerMorph.Debug(ctx, "morph") + + require.Len(t, logs.All(), 4) + require.Len(t, logs.FilterFieldKey("key").All(), 1) + require.Len(t, logs.FilterFieldKey("key1").All(), 1) + require.Len(t, logs.FilterFieldKey("key2").All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) + require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) +} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result new file mode 100644 index 000000000..612fa2967 --- /dev/null +++ b/pkg/util/logger/logger_test.result @@ -0,0 +1,46 @@ +goos: linux +goarch: amd64 +pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger +cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz +BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op +PASS +ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go new file mode 100644 index 000000000..1b98f2e62 --- /dev/null +++ b/pkg/util/logger/tag_string.go @@ -0,0 +1,43 @@ +// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. + +package logger + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TagMain-1] + _ = x[TagMorph-2] + _ = x[TagGrpcSvc-3] + _ = x[TagIr-4] + _ = x[TagProcessor-5] + _ = x[TagEngine-6] + _ = x[TagBlobovnicza-7] + _ = x[TagBlobovniczaTree-8] + _ = x[TagBlobstor-9] + _ = x[TagFSTree-10] + _ = x[TagGC-11] + _ = x[TagShard-12] + _ = x[TagWriteCache-13] + _ = x[TagDeleteSvc-14] + _ = x[TagGetSvc-15] + _ = x[TagSearchSvc-16] + _ = x[TagSessionSvc-17] + _ = x[TagTreeSvc-18] + _ = x[TagPolicer-19] + _ = x[TagReplicator-20] +} + +const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" + +var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} + +func (i Tag) String() string { + i -= 1 + if i >= Tag(len(_Tag_index)-1) { + return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] +} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go new file mode 100644 index 000000000..a5386707e --- /dev/null +++ b/pkg/util/logger/tags.go @@ -0,0 +1,94 @@ +package logger + +import ( + "fmt" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +//go:generate stringer -type Tag -linecomment + +type Tag uint8 + +const ( + _ Tag = iota // + TagMain // main + TagMorph // morph + TagGrpcSvc // grpcsvc + TagIr // ir + TagProcessor // processor + TagEngine // engine + TagBlobovnicza // blobovnicza + TagBlobovniczaTree // blobovniczatree + TagBlobstor // blobstor + TagFSTree // fstree + TagGC // gc + TagShard // shard + TagWriteCache // writecache + TagDeleteSvc // deletesvc + TagGetSvc // getsvc + TagSearchSvc // searchsvc + TagSessionSvc // sessionsvc + TagTreeSvc // treesvc + TagPolicer // policer + TagReplicator // replicator + + defaultLevel = zapcore.InfoLevel +) + +var ( + tagToLogLevel = map[Tag]zap.AtomicLevel{} + stringToTag = map[string]Tag{} +) + +func init() { + for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { + tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) + stringToTag[i.String()] = i + } +} + +// parseTags returns: +// - map(always instantiated) of tag to custom log level for that tag; +// - error if it occurred(map is empty). +func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { + m := make(map[Tag]zapcore.Level) + if len(raw) == 0 { + return m, nil + } + for _, item := range raw { + str, level := item[0], item[1] + if len(level) == 0 { + // It is not necessary to parse tags without level, + // because default log level will be used. + continue + } + var l zapcore.Level + err := l.UnmarshalText([]byte(level)) + if err != nil { + return nil, err + } + tmp := strings.Split(str, ",") + for _, tagStr := range tmp { + tag, ok := stringToTag[strings.TrimSpace(tagStr)] + if !ok { + return nil, fmt.Errorf("unsupported tag %s", str) + } + m[tag] = l + } + } + return m, nil +} + +func UpdateLevelForTags(prm Prm) { + for k, v := range tagToLogLevel { + nk, ok := prm.tl[k] + if ok { + v.SetLevel(nk) + } else { + v.SetLevel(prm.level) + } + } +} diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go new file mode 100644 index 000000000..7373e538f --- /dev/null +++ b/pkg/util/testing/netmap_source.go @@ -0,0 +1,36 @@ +package testing + +import ( + "context" + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +var ( + errInvalidDiff = errors.New("invalid diff") + errNetmapNotFound = errors.New("netmap not found") +) + +type TestNetmapSource struct { + Netmaps map[uint64]*netmap.NetMap + CurrentEpoch uint64 +} + +func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { + if diff >= s.CurrentEpoch { + return nil, errInvalidDiff + } + return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) +} + +func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { + if nm, found := s.Netmaps[epoch]; found { + return nm, nil + } + return nil, errNetmapNotFound +} + +func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { + return s.CurrentEpoch, nil +} diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index f2f8881cf..39a420358 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -1,8 +1,10 @@ package internal import ( + cryptorand "crypto/rand" "crypto/sha256" "fmt" + "math/rand" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -14,14 +16,13 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) func GeneratePayloadPool(count uint, size uint) [][]byte { var pool [][]byte for range count { payload := make([]byte, size) - _, _ = rand.Read(payload) + _, _ = cryptorand.Read(payload) pool = append(pool, payload) } diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go index 4da23a295..fafe61eaa 100644 --- a/scripts/populate-metabase/internal/populate.go +++ b/scripts/populate-metabase/internal/populate.go @@ -31,13 +31,10 @@ func PopulateWithObjects( for range count { obj := factory() - - id := []byte(fmt.Sprintf( - "%c/%c/%c", + id := fmt.Appendf(nil, "%c/%c/%c", digits[rand.Int()%len(digits)], digits[rand.Int()%len(digits)], - digits[rand.Int()%len(digits)], - )) + digits[rand.Int()%len(digits)]) prm := meta.PutPrm{} prm.SetObject(obj)